[llvm] e01df87 - [NVPTX] Test crash introduced by #67073

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Sat Sep 23 10:42:07 PDT 2023


Author: Fangrui Song
Date: 2023-09-23T10:42:02-07:00
New Revision: e01df8716a1b2401e8d7bac65d96e0a9ee76f6e4

URL: https://github.com/llvm/llvm-project/commit/e01df8716a1b2401e8d7bac65d96e0a9ee76f6e4
DIFF: https://github.com/llvm/llvm-project/commit/e01df8716a1b2401e8d7bac65d96e0a9ee76f6e4.diff

LOG: [NVPTX] Test crash introduced by #67073

The test is adapted from https://reviews.llvm.org/D46008

Added: 
    llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll b/llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll
new file mode 100644
index 000000000000000..9d60e1992c890c1
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll
@@ -0,0 +1,1197 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=nvptx64 -mcpu=sm_80 -mattr=+ptx70 < %s | FileCheck %s
+
+; ============================================================================ ;
+; 8-bit vector width
+; ============================================================================ ;
+
+define <1 x i8> @out_v1i8(<1 x i8> %x, <1 x i8> %y, <1 x i8> %mask) nounwind {
+; CHECK-LABEL: out_v1i8(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.u8 %rs1, [out_v1i8_param_0];
+; CHECK-NEXT:    ld.param.u8 %rs2, [out_v1i8_param_2];
+; CHECK-NEXT:    and.b16 %rs3, %rs1, %rs2;
+; CHECK-NEXT:    ld.param.u8 %rs4, [out_v1i8_param_1];
+; CHECK-NEXT:    not.b16 %rs5, %rs2;
+; CHECK-NEXT:    and.b16 %rs6, %rs4, %rs5;
+; CHECK-NEXT:    or.b16 %rs7, %rs3, %rs6;
+; CHECK-NEXT:    st.param.b8 [func_retval0+0], %rs7;
+; CHECK-NEXT:    ret;
+  %mx = and <1 x i8> %x, %mask
+  %notmask = xor <1 x i8> %mask, <i8 -1>
+  %my = and <1 x i8> %y, %notmask
+  %r = or <1 x i8> %mx, %my
+  ret <1 x i8> %r
+}
+
+; ============================================================================ ;
+; 16-bit vector width
+; ============================================================================ ;
+
+define <1 x i16> @out_v1i16(<1 x i16> %x, <1 x i16> %y, <1 x i16> %mask) nounwind {
+; CHECK-LABEL: out_v1i16(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.u16 %rs1, [out_v1i16_param_0];
+; CHECK-NEXT:    ld.param.u16 %rs2, [out_v1i16_param_2];
+; CHECK-NEXT:    and.b16 %rs3, %rs1, %rs2;
+; CHECK-NEXT:    ld.param.u16 %rs4, [out_v1i16_param_1];
+; CHECK-NEXT:    not.b16 %rs5, %rs2;
+; CHECK-NEXT:    and.b16 %rs6, %rs4, %rs5;
+; CHECK-NEXT:    or.b16 %rs7, %rs3, %rs6;
+; CHECK-NEXT:    st.param.b16 [func_retval0+0], %rs7;
+; CHECK-NEXT:    ret;
+  %mx = and <1 x i16> %x, %mask
+  %notmask = xor <1 x i16> %mask, <i16 -1>
+  %my = and <1 x i16> %y, %notmask
+  %r = or <1 x i16> %mx, %my
+  ret <1 x i16> %r
+}
+
+; ============================================================================ ;
+; 32-bit vector width
+; ============================================================================ ;
+
+define <4 x i8> @out_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind {
+; CHECK-LABEL: out_v4i8(
+; CHECK:       {
+; CHECK-NEXT:    .local .align 2 .b8 __local_depot2[4];
+; CHECK-NEXT:    .reg .b64 %SP;
+; CHECK-NEXT:    .reg .b64 %SPL;
+; CHECK-NEXT:    .reg .b16 %rs<36>;
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    mov.u64 %SPL, __local_depot2;
+; CHECK-NEXT:    cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs1, %rs2, %rs3, %rs4}, [out_v4i8_param_0];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs5, %rs6, %rs7, %rs8}, [out_v4i8_param_2];
+; CHECK-NEXT:    and.b16 %rs9, %rs1, %rs5;
+; CHECK-NEXT:    and.b16 %rs10, %rs2, %rs6;
+; CHECK-NEXT:    and.b16 %rs11, %rs3, %rs7;
+; CHECK-NEXT:    and.b16 %rs12, %rs4, %rs8;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs13, %rs14, %rs15, %rs16}, [out_v4i8_param_1];
+; CHECK-NEXT:    xor.b16 %rs17, %rs8, 255;
+; CHECK-NEXT:    xor.b16 %rs18, %rs7, 255;
+; CHECK-NEXT:    xor.b16 %rs19, %rs6, 255;
+; CHECK-NEXT:    xor.b16 %rs20, %rs5, 255;
+; CHECK-NEXT:    and.b16 %rs21, %rs13, %rs20;
+; CHECK-NEXT:    and.b16 %rs22, %rs14, %rs19;
+; CHECK-NEXT:    and.b16 %rs23, %rs15, %rs18;
+; CHECK-NEXT:    and.b16 %rs24, %rs16, %rs17;
+; CHECK-NEXT:    or.b16 %rs25, %rs12, %rs24;
+; CHECK-NEXT:    or.b16 %rs26, %rs11, %rs23;
+; CHECK-NEXT:    mov.b32 %r1, {%rs26, %rs25};
+; CHECK-NEXT:    mov.b32 {%rs27, %rs28}, %r1;
+; CHECK-NEXT:    st.v2.u8 [%SP+0], {%rs27, %rs28};
+; CHECK-NEXT:    or.b16 %rs29, %rs10, %rs22;
+; CHECK-NEXT:    or.b16 %rs30, %rs9, %rs21;
+; CHECK-NEXT:    mov.b32 %r2, {%rs30, %rs29};
+; CHECK-NEXT:    mov.b32 {%rs31, %rs32}, %r2;
+; CHECK-NEXT:    st.v2.u8 [%SP+2], {%rs31, %rs32};
+; CHECK-NEXT:    ld.u16 %r3, [%SP+0];
+; CHECK-NEXT:    shl.b32 %r4, %r3, 16;
+; CHECK-NEXT:    ld.u16 %r5, [%SP+2];
+; CHECK-NEXT:    or.b32 %r6, %r5, %r4;
+; CHECK-NEXT:    shr.u32 %r7, %r6, 8;
+; CHECK-NEXT:    cvt.u16.u32 %rs33, %r7;
+; CHECK-NEXT:    cvt.u16.u32 %rs34, %r3;
+; CHECK-NEXT:    bfe.s32 %r8, %r3, 8, 8;
+; CHECK-NEXT:    cvt.u16.u32 %rs35, %r8;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+0], {%rs31, %rs33, %rs34, %rs35};
+; CHECK-NEXT:    ret;
+  %mx = and <4 x i8> %x, %mask
+  %notmask = xor <4 x i8> %mask, <i8 -1, i8 -1, i8 -1, i8 -1>
+  %my = and <4 x i8> %y, %notmask
+  %r = or <4 x i8> %mx, %my
+  ret <4 x i8> %r
+}
+
+define <4 x i8> @out_v4i8_undef(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind {
+; CHECK-LABEL: out_v4i8_undef(
+; CHECK:       {
+; CHECK-NEXT:    .local .align 2 .b8 __local_depot3[4];
+; CHECK-NEXT:    .reg .b64 %SP;
+; CHECK-NEXT:    .reg .b64 %SPL;
+; CHECK-NEXT:    .reg .b16 %rs<33>;
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    mov.u64 %SPL, __local_depot3;
+; CHECK-NEXT:    cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs1, %rs2, %rs3, %rs4}, [out_v4i8_undef_param_0];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs5, %rs6, %rs7, %rs8}, [out_v4i8_undef_param_2];
+; CHECK-NEXT:    and.b16 %rs9, %rs1, %rs5;
+; CHECK-NEXT:    and.b16 %rs10, %rs2, %rs6;
+; CHECK-NEXT:    and.b16 %rs11, %rs3, %rs7;
+; CHECK-NEXT:    and.b16 %rs12, %rs4, %rs8;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs13, %rs14, %rs15, %rs16}, [out_v4i8_undef_param_1];
+; CHECK-NEXT:    xor.b16 %rs17, %rs8, 255;
+; CHECK-NEXT:    xor.b16 %rs18, %rs6, 255;
+; CHECK-NEXT:    xor.b16 %rs19, %rs5, 255;
+; CHECK-NEXT:    and.b16 %rs20, %rs13, %rs19;
+; CHECK-NEXT:    and.b16 %rs21, %rs14, %rs18;
+; CHECK-NEXT:    and.b16 %rs22, %rs16, %rs17;
+; CHECK-NEXT:    or.b16 %rs23, %rs12, %rs22;
+; CHECK-NEXT:    mov.b32 %r1, {%rs11, %rs23};
+; CHECK-NEXT:    mov.b32 {%rs24, %rs25}, %r1;
+; CHECK-NEXT:    st.v2.u8 [%SP+0], {%rs24, %rs25};
+; CHECK-NEXT:    or.b16 %rs26, %rs10, %rs21;
+; CHECK-NEXT:    or.b16 %rs27, %rs9, %rs20;
+; CHECK-NEXT:    mov.b32 %r2, {%rs27, %rs26};
+; CHECK-NEXT:    mov.b32 {%rs28, %rs29}, %r2;
+; CHECK-NEXT:    st.v2.u8 [%SP+2], {%rs28, %rs29};
+; CHECK-NEXT:    ld.u16 %r3, [%SP+0];
+; CHECK-NEXT:    shl.b32 %r4, %r3, 16;
+; CHECK-NEXT:    ld.u16 %r5, [%SP+2];
+; CHECK-NEXT:    or.b32 %r6, %r5, %r4;
+; CHECK-NEXT:    shr.u32 %r7, %r6, 8;
+; CHECK-NEXT:    cvt.u16.u32 %rs30, %r7;
+; CHECK-NEXT:    cvt.u16.u32 %rs31, %r3;
+; CHECK-NEXT:    bfe.s32 %r8, %r3, 8, 8;
+; CHECK-NEXT:    cvt.u16.u32 %rs32, %r8;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+0], {%rs28, %rs30, %rs31, %rs32};
+; CHECK-NEXT:    ret;
+  %mx = and <4 x i8> %x, %mask
+  %notmask = xor <4 x i8> %mask, <i8 -1, i8 -1, i8 undef, i8 -1>
+  %my = and <4 x i8> %y, %notmask
+  %r = or <4 x i8> %mx, %my
+  ret <4 x i8> %r
+}
+
+define <2 x i16> @out_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwind {
+; CHECK-LABEL: out_v2i16(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<13>;
+; CHECK-NEXT:    .reg .b32 %r<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.u32 %r1, [out_v2i16_param_2];
+; CHECK-NEXT:    ld.param.u32 %r3, [out_v2i16_param_1];
+; CHECK-NEXT:    ld.param.u32 %r4, [out_v2i16_param_0];
+; CHECK-NEXT:    and.b32 %r5, %r4, %r1;
+; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
+; CHECK-NEXT:    not.b16 %rs3, %rs2;
+; CHECK-NEXT:    not.b16 %rs4, %rs1;
+; CHECK-NEXT:    mov.b32 {%rs5, %rs6}, %r3;
+; CHECK-NEXT:    and.b16 %rs7, %rs5, %rs4;
+; CHECK-NEXT:    and.b16 %rs8, %rs6, %rs3;
+; CHECK-NEXT:    mov.b32 {%rs9, %rs10}, %r5;
+; CHECK-NEXT:    or.b16 %rs11, %rs10, %rs8;
+; CHECK-NEXT:    or.b16 %rs12, %rs9, %rs7;
+; CHECK-NEXT:    mov.b32 %r7, {%rs12, %rs11};
+; CHECK-NEXT:    st.param.b32 [func_retval0+0], %r7;
+; CHECK-NEXT:    ret;
+  %mx = and <2 x i16> %x, %mask
+  %notmask = xor <2 x i16> %mask, <i16 -1, i16 -1>
+  %my = and <2 x i16> %y, %notmask
+  %r = or <2 x i16> %mx, %my
+  ret <2 x i16> %r
+}
+
+define <1 x i32> @out_v1i32(<1 x i32> %x, <1 x i32> %y, <1 x i32> %mask) nounwind {
+; CHECK-LABEL: out_v1i32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.u32 %r1, [out_v1i32_param_0];
+; CHECK-NEXT:    ld.param.u32 %r2, [out_v1i32_param_2];
+; CHECK-NEXT:    and.b32 %r3, %r1, %r2;
+; CHECK-NEXT:    ld.param.u32 %r4, [out_v1i32_param_1];
+; CHECK-NEXT:    not.b32 %r5, %r2;
+; CHECK-NEXT:    and.b32 %r6, %r4, %r5;
+; CHECK-NEXT:    or.b32 %r7, %r3, %r6;
+; CHECK-NEXT:    st.param.b32 [func_retval0+0], %r7;
+; CHECK-NEXT:    ret;
+  %mx = and <1 x i32> %x, %mask
+  %notmask = xor <1 x i32> %mask, <i32 -1>
+  %my = and <1 x i32> %y, %notmask
+  %r = or <1 x i32> %mx, %my
+  ret <1 x i32> %r
+}
+
+; ============================================================================ ;
+; 64-bit vector width
+; ============================================================================ ;
+
+define <8 x i8> @out_v8i8(<8 x i8> %x, <8 x i8> %y, <8 x i8> %mask) nounwind {
+; CHECK-LABEL: out_v8i8(
+; CHECK:       {
+; CHECK-NEXT:    .local .align 2 .b8 __local_depot6[8];
+; CHECK-NEXT:    .reg .b64 %SP;
+; CHECK-NEXT:    .reg .b64 %SPL;
+; CHECK-NEXT:    .reg .b16 %rs<72>;
+; CHECK-NEXT:    .reg .b32 %r<14>;
+; CHECK-NEXT:    .reg .b64 %rd<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    mov.u64 %SPL, __local_depot6;
+; CHECK-NEXT:    cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs1, %rs2, %rs3, %rs4}, [out_v8i8_param_0];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs5, %rs6, %rs7, %rs8}, [out_v8i8_param_0+4];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs9, %rs10, %rs11, %rs12}, [out_v8i8_param_2+4];
+; CHECK-NEXT:    and.b16 %rs13, %rs5, %rs9;
+; CHECK-NEXT:    and.b16 %rs14, %rs6, %rs10;
+; CHECK-NEXT:    and.b16 %rs15, %rs7, %rs11;
+; CHECK-NEXT:    and.b16 %rs16, %rs8, %rs12;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs17, %rs18, %rs19, %rs20}, [out_v8i8_param_2];
+; CHECK-NEXT:    and.b16 %rs21, %rs1, %rs17;
+; CHECK-NEXT:    and.b16 %rs22, %rs2, %rs18;
+; CHECK-NEXT:    and.b16 %rs23, %rs3, %rs19;
+; CHECK-NEXT:    and.b16 %rs24, %rs4, %rs20;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs25, %rs26, %rs27, %rs28}, [out_v8i8_param_1];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs29, %rs30, %rs31, %rs32}, [out_v8i8_param_1+4];
+; CHECK-NEXT:    xor.b16 %rs33, %rs20, 255;
+; CHECK-NEXT:    xor.b16 %rs34, %rs19, 255;
+; CHECK-NEXT:    xor.b16 %rs35, %rs18, 255;
+; CHECK-NEXT:    xor.b16 %rs36, %rs17, 255;
+; CHECK-NEXT:    xor.b16 %rs37, %rs12, 255;
+; CHECK-NEXT:    xor.b16 %rs38, %rs11, 255;
+; CHECK-NEXT:    xor.b16 %rs39, %rs10, 255;
+; CHECK-NEXT:    xor.b16 %rs40, %rs9, 255;
+; CHECK-NEXT:    and.b16 %rs41, %rs29, %rs40;
+; CHECK-NEXT:    and.b16 %rs42, %rs30, %rs39;
+; CHECK-NEXT:    and.b16 %rs43, %rs31, %rs38;
+; CHECK-NEXT:    and.b16 %rs44, %rs32, %rs37;
+; CHECK-NEXT:    and.b16 %rs45, %rs25, %rs36;
+; CHECK-NEXT:    and.b16 %rs46, %rs26, %rs35;
+; CHECK-NEXT:    and.b16 %rs47, %rs27, %rs34;
+; CHECK-NEXT:    and.b16 %rs48, %rs28, %rs33;
+; CHECK-NEXT:    or.b16 %rs49, %rs24, %rs48;
+; CHECK-NEXT:    or.b16 %rs50, %rs23, %rs47;
+; CHECK-NEXT:    mov.b32 %r1, {%rs50, %rs49};
+; CHECK-NEXT:    mov.b32 {%rs51, %rs52}, %r1;
+; CHECK-NEXT:    st.v2.u8 [%SP+0], {%rs51, %rs52};
+; CHECK-NEXT:    or.b16 %rs53, %rs22, %rs46;
+; CHECK-NEXT:    or.b16 %rs54, %rs21, %rs45;
+; CHECK-NEXT:    mov.b32 %r2, {%rs54, %rs53};
+; CHECK-NEXT:    mov.b32 {%rs55, %rs56}, %r2;
+; CHECK-NEXT:    st.v2.u8 [%SP+2], {%rs55, %rs56};
+; CHECK-NEXT:    or.b16 %rs57, %rs16, %rs44;
+; CHECK-NEXT:    or.b16 %rs58, %rs15, %rs43;
+; CHECK-NEXT:    mov.b32 %r3, {%rs58, %rs57};
+; CHECK-NEXT:    mov.b32 {%rs59, %rs60}, %r3;
+; CHECK-NEXT:    st.v2.u8 [%SP+4], {%rs59, %rs60};
+; CHECK-NEXT:    or.b16 %rs61, %rs14, %rs42;
+; CHECK-NEXT:    or.b16 %rs62, %rs13, %rs41;
+; CHECK-NEXT:    mov.b32 %r4, {%rs62, %rs61};
+; CHECK-NEXT:    mov.b32 {%rs63, %rs64}, %r4;
+; CHECK-NEXT:    st.v2.u8 [%SP+6], {%rs63, %rs64};
+; CHECK-NEXT:    ld.u16 %r5, [%SP+0];
+; CHECK-NEXT:    shl.b32 %r6, %r5, 16;
+; CHECK-NEXT:    ld.u16 %r7, [%SP+2];
+; CHECK-NEXT:    or.b32 %r8, %r7, %r6;
+; CHECK-NEXT:    cvt.u64.u32 %rd1, %r8;
+; CHECK-NEXT:    ld.u16 %r9, [%SP+4];
+; CHECK-NEXT:    shl.b32 %r10, %r9, 16;
+; CHECK-NEXT:    ld.u16 %r11, [%SP+6];
+; CHECK-NEXT:    or.b32 %r12, %r11, %r10;
+; CHECK-NEXT:    cvt.u64.u32 %rd2, %r12;
+; CHECK-NEXT:    shl.b64 %rd3, %rd2, 32;
+; CHECK-NEXT:    or.b64 %rd4, %rd1, %rd3;
+; CHECK-NEXT:    shr.u32 %r13, %r12, 8;
+; CHECK-NEXT:    shr.u64 %rd5, %rd4, 24;
+; CHECK-NEXT:    cvt.u16.u64 %rs65, %rd5;
+; CHECK-NEXT:    shr.u64 %rd6, %rd1, 16;
+; CHECK-NEXT:    cvt.u16.u64 %rs66, %rd6;
+; CHECK-NEXT:    shr.u64 %rd7, %rd1, 8;
+; CHECK-NEXT:    cvt.u16.u64 %rs67, %rd7;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+0], {%rs55, %rs67, %rs66, %rs65};
+; CHECK-NEXT:    cvt.u16.u32 %rs68, %r13;
+; CHECK-NEXT:    bfe.s64 %rd8, %rd2, 24, 8;
+; CHECK-NEXT:    cvt.u16.u64 %rs69, %rd8;
+; CHECK-NEXT:    cvt.u16.u32 %rs70, %r9;
+; CHECK-NEXT:    cvt.u16.u32 %rs71, %r11;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+4], {%rs71, %rs68, %rs70, %rs69};
+; CHECK-NEXT:    ret;
+  %mx = and <8 x i8> %x, %mask
+  %notmask = xor <8 x i8> %mask, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+  %my = and <8 x i8> %y, %notmask
+  %r = or <8 x i8> %mx, %my
+  ret <8 x i8> %r
+}
+
+define <4 x i16> @out_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind {
+; CHECK-LABEL: out_v4i16(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<25>;
+; CHECK-NEXT:    .reg .b32 %r<17>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.u32 {%r1, %r2}, [out_v4i16_param_1];
+; CHECK-NEXT:    ld.param.v2.u32 {%r5, %r6}, [out_v4i16_param_2];
+; CHECK-NEXT:    ld.param.v2.u32 {%r9, %r10}, [out_v4i16_param_0];
+; CHECK-NEXT:    and.b32 %r11, %r9, %r5;
+; CHECK-NEXT:    and.b32 %r13, %r10, %r6;
+; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r6;
+; CHECK-NEXT:    not.b16 %rs3, %rs2;
+; CHECK-NEXT:    not.b16 %rs4, %rs1;
+; CHECK-NEXT:    mov.b32 {%rs5, %rs6}, %r5;
+; CHECK-NEXT:    not.b16 %rs7, %rs6;
+; CHECK-NEXT:    not.b16 %rs8, %rs5;
+; CHECK-NEXT:    mov.b32 {%rs9, %rs10}, %r1;
+; CHECK-NEXT:    and.b16 %rs11, %rs9, %rs8;
+; CHECK-NEXT:    and.b16 %rs12, %rs10, %rs7;
+; CHECK-NEXT:    mov.b32 {%rs13, %rs14}, %r2;
+; CHECK-NEXT:    and.b16 %rs15, %rs13, %rs4;
+; CHECK-NEXT:    and.b16 %rs16, %rs14, %rs3;
+; CHECK-NEXT:    mov.b32 {%rs17, %rs18}, %r13;
+; CHECK-NEXT:    or.b16 %rs19, %rs18, %rs16;
+; CHECK-NEXT:    or.b16 %rs20, %rs17, %rs15;
+; CHECK-NEXT:    mov.b32 %r15, {%rs20, %rs19};
+; CHECK-NEXT:    mov.b32 {%rs21, %rs22}, %r11;
+; CHECK-NEXT:    or.b16 %rs23, %rs22, %rs12;
+; CHECK-NEXT:    or.b16 %rs24, %rs21, %rs11;
+; CHECK-NEXT:    mov.b32 %r16, {%rs24, %rs23};
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0+0], {%r16, %r15};
+; CHECK-NEXT:    ret;
+  %mx = and <4 x i16> %x, %mask
+  %notmask = xor <4 x i16> %mask, <i16 -1, i16 -1, i16 -1, i16 -1>
+  %my = and <4 x i16> %y, %notmask
+  %r = or <4 x i16> %mx, %my
+  ret <4 x i16> %r
+}
+
+define <4 x i16> @out_v4i16_undef(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind {
+; CHECK-LABEL: out_v4i16_undef(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<20>;
+; CHECK-NEXT:    .reg .b32 %r<17>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.u32 {%r1, %r2}, [out_v4i16_undef_param_1];
+; CHECK-NEXT:    ld.param.v2.u32 {%r5, %r6}, [out_v4i16_undef_param_2];
+; CHECK-NEXT:    ld.param.v2.u32 {%r9, %r10}, [out_v4i16_undef_param_0];
+; CHECK-NEXT:    and.b32 %r11, %r10, %r6;
+; CHECK-NEXT:    and.b32 %r13, %r9, %r5;
+; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r5;
+; CHECK-NEXT:    not.b16 %rs3, %rs2;
+; CHECK-NEXT:    not.b16 %rs4, %rs1;
+; CHECK-NEXT:    { .reg .b16 tmp; mov.b32 {tmp, %rs5}, %r6; }
+; CHECK-NEXT:    not.b16 %rs6, %rs5;
+; CHECK-NEXT:    { .reg .b16 tmp; mov.b32 {tmp, %rs7}, %r2; }
+; CHECK-NEXT:    and.b16 %rs8, %rs7, %rs6;
+; CHECK-NEXT:    mov.b32 {%rs9, %rs10}, %r1;
+; CHECK-NEXT:    and.b16 %rs11, %rs9, %rs4;
+; CHECK-NEXT:    and.b16 %rs12, %rs10, %rs3;
+; CHECK-NEXT:    mov.b32 {%rs13, %rs14}, %r13;
+; CHECK-NEXT:    or.b16 %rs15, %rs14, %rs12;
+; CHECK-NEXT:    or.b16 %rs16, %rs13, %rs11;
+; CHECK-NEXT:    mov.b32 %r15, {%rs16, %rs15};
+; CHECK-NEXT:    mov.b32 {%rs17, %rs18}, %r11;
+; CHECK-NEXT:    or.b16 %rs19, %rs18, %rs8;
+; CHECK-NEXT:    mov.b32 %r16, {%rs17, %rs19};
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0+0], {%r15, %r16};
+; CHECK-NEXT:    ret;
+  %mx = and <4 x i16> %x, %mask
+  %notmask = xor <4 x i16> %mask, <i16 -1, i16 -1, i16 undef, i16 -1>
+  %my = and <4 x i16> %y, %notmask
+  %r = or <4 x i16> %mx, %my
+  ret <4 x i16> %r
+}
+
+define <2 x i32> @out_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %mask) nounwind {
+; CHECK-LABEL: out_v2i32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<15>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.u32 {%r1, %r2}, [out_v2i32_param_0];
+; CHECK-NEXT:    ld.param.v2.u32 {%r3, %r4}, [out_v2i32_param_2];
+; CHECK-NEXT:    and.b32 %r5, %r1, %r3;
+; CHECK-NEXT:    and.b32 %r6, %r2, %r4;
+; CHECK-NEXT:    ld.param.v2.u32 {%r7, %r8}, [out_v2i32_param_1];
+; CHECK-NEXT:    not.b32 %r9, %r4;
+; CHECK-NEXT:    not.b32 %r10, %r3;
+; CHECK-NEXT:    and.b32 %r11, %r7, %r10;
+; CHECK-NEXT:    and.b32 %r12, %r8, %r9;
+; CHECK-NEXT:    or.b32 %r13, %r6, %r12;
+; CHECK-NEXT:    or.b32 %r14, %r5, %r11;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0+0], {%r14, %r13};
+; CHECK-NEXT:    ret;
+  %mx = and <2 x i32> %x, %mask
+  %notmask = xor <2 x i32> %mask, <i32 -1, i32 -1>
+  %my = and <2 x i32> %y, %notmask
+  %r = or <2 x i32> %mx, %my
+  ret <2 x i32> %r
+}
+
+define <1 x i64> @out_v1i64(<1 x i64> %x, <1 x i64> %y, <1 x i64> %mask) nounwind {
+; CHECK-LABEL: out_v1i64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b64 %rd<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.u64 %rd1, [out_v1i64_param_0];
+; CHECK-NEXT:    ld.param.u64 %rd2, [out_v1i64_param_2];
+; CHECK-NEXT:    and.b64 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    ld.param.u64 %rd4, [out_v1i64_param_1];
+; CHECK-NEXT:    not.b64 %rd5, %rd2;
+; CHECK-NEXT:    and.b64 %rd6, %rd4, %rd5;
+; CHECK-NEXT:    or.b64 %rd7, %rd3, %rd6;
+; CHECK-NEXT:    st.param.b64 [func_retval0+0], %rd7;
+; CHECK-NEXT:    ret;
+  %mx = and <1 x i64> %x, %mask
+  %notmask = xor <1 x i64> %mask, <i64 -1>
+  %my = and <1 x i64> %y, %notmask
+  %r = or <1 x i64> %mx, %my
+  ret <1 x i64> %r
+}
+
+; ============================================================================ ;
+; 128-bit vector width
+; ============================================================================ ;
+
+define <16 x i8> @out_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) nounwind {
+; CHECK-LABEL: out_v16i8(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<129>;
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.u8 {%rs1, %rs2, %rs3, %rs4}, [out_v16i8_param_0+12];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs5, %rs6, %rs7, %rs8}, [out_v16i8_param_0+8];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs9, %rs10, %rs11, %rs12}, [out_v16i8_param_0+4];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs13, %rs14, %rs15, %rs16}, [out_v16i8_param_0];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs17, %rs18, %rs19, %rs20}, [out_v16i8_param_2];
+; CHECK-NEXT:    and.b16 %rs21, %rs15, %rs19;
+; CHECK-NEXT:    and.b16 %rs22, %rs16, %rs20;
+; CHECK-NEXT:    and.b16 %rs23, %rs13, %rs17;
+; CHECK-NEXT:    and.b16 %rs24, %rs14, %rs18;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs25, %rs26, %rs27, %rs28}, [out_v16i8_param_2+4];
+; CHECK-NEXT:    and.b16 %rs29, %rs11, %rs27;
+; CHECK-NEXT:    and.b16 %rs30, %rs12, %rs28;
+; CHECK-NEXT:    and.b16 %rs31, %rs9, %rs25;
+; CHECK-NEXT:    and.b16 %rs32, %rs10, %rs26;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs33, %rs34, %rs35, %rs36}, [out_v16i8_param_2+8];
+; CHECK-NEXT:    and.b16 %rs37, %rs7, %rs35;
+; CHECK-NEXT:    and.b16 %rs38, %rs8, %rs36;
+; CHECK-NEXT:    and.b16 %rs39, %rs5, %rs33;
+; CHECK-NEXT:    and.b16 %rs40, %rs6, %rs34;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs41, %rs42, %rs43, %rs44}, [out_v16i8_param_2+12];
+; CHECK-NEXT:    and.b16 %rs45, %rs3, %rs43;
+; CHECK-NEXT:    and.b16 %rs46, %rs4, %rs44;
+; CHECK-NEXT:    and.b16 %rs47, %rs1, %rs41;
+; CHECK-NEXT:    and.b16 %rs48, %rs2, %rs42;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs49, %rs50, %rs51, %rs52}, [out_v16i8_param_1+12];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs53, %rs54, %rs55, %rs56}, [out_v16i8_param_1+8];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs57, %rs58, %rs59, %rs60}, [out_v16i8_param_1+4];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs61, %rs62, %rs63, %rs64}, [out_v16i8_param_1];
+; CHECK-NEXT:    xor.b16 %rs65, %rs42, 255;
+; CHECK-NEXT:    xor.b16 %rs66, %rs41, 255;
+; CHECK-NEXT:    xor.b16 %rs67, %rs44, 255;
+; CHECK-NEXT:    xor.b16 %rs68, %rs43, 255;
+; CHECK-NEXT:    xor.b16 %rs69, %rs34, 255;
+; CHECK-NEXT:    xor.b16 %rs70, %rs33, 255;
+; CHECK-NEXT:    xor.b16 %rs71, %rs36, 255;
+; CHECK-NEXT:    xor.b16 %rs72, %rs35, 255;
+; CHECK-NEXT:    xor.b16 %rs73, %rs26, 255;
+; CHECK-NEXT:    xor.b16 %rs74, %rs25, 255;
+; CHECK-NEXT:    xor.b16 %rs75, %rs28, 255;
+; CHECK-NEXT:    xor.b16 %rs76, %rs27, 255;
+; CHECK-NEXT:    xor.b16 %rs77, %rs18, 255;
+; CHECK-NEXT:    xor.b16 %rs78, %rs17, 255;
+; CHECK-NEXT:    xor.b16 %rs79, %rs20, 255;
+; CHECK-NEXT:    xor.b16 %rs80, %rs19, 255;
+; CHECK-NEXT:    and.b16 %rs81, %rs63, %rs80;
+; CHECK-NEXT:    and.b16 %rs82, %rs64, %rs79;
+; CHECK-NEXT:    and.b16 %rs83, %rs61, %rs78;
+; CHECK-NEXT:    and.b16 %rs84, %rs62, %rs77;
+; CHECK-NEXT:    and.b16 %rs85, %rs59, %rs76;
+; CHECK-NEXT:    and.b16 %rs86, %rs60, %rs75;
+; CHECK-NEXT:    and.b16 %rs87, %rs57, %rs74;
+; CHECK-NEXT:    and.b16 %rs88, %rs58, %rs73;
+; CHECK-NEXT:    and.b16 %rs89, %rs55, %rs72;
+; CHECK-NEXT:    and.b16 %rs90, %rs56, %rs71;
+; CHECK-NEXT:    and.b16 %rs91, %rs53, %rs70;
+; CHECK-NEXT:    and.b16 %rs92, %rs54, %rs69;
+; CHECK-NEXT:    and.b16 %rs93, %rs51, %rs68;
+; CHECK-NEXT:    and.b16 %rs94, %rs52, %rs67;
+; CHECK-NEXT:    and.b16 %rs95, %rs49, %rs66;
+; CHECK-NEXT:    and.b16 %rs96, %rs50, %rs65;
+; CHECK-NEXT:    or.b16 %rs97, %rs48, %rs96;
+; CHECK-NEXT:    or.b16 %rs98, %rs47, %rs95;
+; CHECK-NEXT:    mov.b32 %r1, {%rs98, %rs97};
+; CHECK-NEXT:    or.b16 %rs99, %rs46, %rs94;
+; CHECK-NEXT:    or.b16 %rs100, %rs45, %rs93;
+; CHECK-NEXT:    mov.b32 %r2, {%rs100, %rs99};
+; CHECK-NEXT:    or.b16 %rs101, %rs40, %rs92;
+; CHECK-NEXT:    or.b16 %rs102, %rs39, %rs91;
+; CHECK-NEXT:    mov.b32 %r3, {%rs102, %rs101};
+; CHECK-NEXT:    or.b16 %rs103, %rs38, %rs90;
+; CHECK-NEXT:    or.b16 %rs104, %rs37, %rs89;
+; CHECK-NEXT:    mov.b32 %r4, {%rs104, %rs103};
+; CHECK-NEXT:    or.b16 %rs105, %rs32, %rs88;
+; CHECK-NEXT:    or.b16 %rs106, %rs31, %rs87;
+; CHECK-NEXT:    mov.b32 %r5, {%rs106, %rs105};
+; CHECK-NEXT:    or.b16 %rs107, %rs30, %rs86;
+; CHECK-NEXT:    or.b16 %rs108, %rs29, %rs85;
+; CHECK-NEXT:    mov.b32 %r6, {%rs108, %rs107};
+; CHECK-NEXT:    or.b16 %rs109, %rs24, %rs84;
+; CHECK-NEXT:    or.b16 %rs110, %rs23, %rs83;
+; CHECK-NEXT:    mov.b32 %r7, {%rs110, %rs109};
+; CHECK-NEXT:    or.b16 %rs111, %rs22, %rs82;
+; CHECK-NEXT:    or.b16 %rs112, %rs21, %rs81;
+; CHECK-NEXT:    mov.b32 %r8, {%rs112, %rs111};
+; CHECK-NEXT:    mov.b32 {%rs113, %rs114}, %r8;
+; CHECK-NEXT:    mov.b32 {%rs115, %rs116}, %r7;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+0], {%rs115, %rs116, %rs113, %rs114};
+; CHECK-NEXT:    mov.b32 {%rs117, %rs118}, %r6;
+; CHECK-NEXT:    mov.b32 {%rs119, %rs120}, %r5;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+4], {%rs119, %rs120, %rs117, %rs118};
+; CHECK-NEXT:    mov.b32 {%rs121, %rs122}, %r4;
+; CHECK-NEXT:    mov.b32 {%rs123, %rs124}, %r3;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+8], {%rs123, %rs124, %rs121, %rs122};
+; CHECK-NEXT:    mov.b32 {%rs125, %rs126}, %r2;
+; CHECK-NEXT:    mov.b32 {%rs127, %rs128}, %r1;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+12], {%rs127, %rs128, %rs125, %rs126};
+; CHECK-NEXT:    ret;
+  %mx = and <16 x i8> %x, %mask
+  %notmask = xor <16 x i8> %mask, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+  %my = and <16 x i8> %y, %notmask
+  %r = or <16 x i8> %mx, %my
+  ret <16 x i8> %r
+}
+
+define <8 x i16> @out_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) nounwind {
+; CHECK-LABEL: out_v8i16(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<49>;
+; CHECK-NEXT:    .reg .b32 %r<33>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.u32 {%r1, %r2, %r3, %r4}, [out_v8i16_param_1];
+; CHECK-NEXT:    ld.param.v4.u32 {%r9, %r10, %r11, %r12}, [out_v8i16_param_2];
+; CHECK-NEXT:    ld.param.v4.u32 {%r17, %r18, %r19, %r20}, [out_v8i16_param_0];
+; CHECK-NEXT:    and.b32 %r21, %r17, %r9;
+; CHECK-NEXT:    and.b32 %r23, %r18, %r10;
+; CHECK-NEXT:    and.b32 %r25, %r19, %r11;
+; CHECK-NEXT:    and.b32 %r27, %r20, %r12;
+; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r12;
+; CHECK-NEXT:    not.b16 %rs3, %rs2;
+; CHECK-NEXT:    not.b16 %rs4, %rs1;
+; CHECK-NEXT:    mov.b32 {%rs5, %rs6}, %r11;
+; CHECK-NEXT:    not.b16 %rs7, %rs6;
+; CHECK-NEXT:    not.b16 %rs8, %rs5;
+; CHECK-NEXT:    mov.b32 {%rs9, %rs10}, %r10;
+; CHECK-NEXT:    not.b16 %rs11, %rs10;
+; CHECK-NEXT:    not.b16 %rs12, %rs9;
+; CHECK-NEXT:    mov.b32 {%rs13, %rs14}, %r9;
+; CHECK-NEXT:    not.b16 %rs15, %rs14;
+; CHECK-NEXT:    not.b16 %rs16, %rs13;
+; CHECK-NEXT:    mov.b32 {%rs17, %rs18}, %r1;
+; CHECK-NEXT:    and.b16 %rs19, %rs17, %rs16;
+; CHECK-NEXT:    and.b16 %rs20, %rs18, %rs15;
+; CHECK-NEXT:    mov.b32 {%rs21, %rs22}, %r2;
+; CHECK-NEXT:    and.b16 %rs23, %rs21, %rs12;
+; CHECK-NEXT:    and.b16 %rs24, %rs22, %rs11;
+; CHECK-NEXT:    mov.b32 {%rs25, %rs26}, %r3;
+; CHECK-NEXT:    and.b16 %rs27, %rs25, %rs8;
+; CHECK-NEXT:    and.b16 %rs28, %rs26, %rs7;
+; CHECK-NEXT:    mov.b32 {%rs29, %rs30}, %r4;
+; CHECK-NEXT:    and.b16 %rs31, %rs29, %rs4;
+; CHECK-NEXT:    and.b16 %rs32, %rs30, %rs3;
+; CHECK-NEXT:    mov.b32 {%rs33, %rs34}, %r27;
+; CHECK-NEXT:    or.b16 %rs35, %rs34, %rs32;
+; CHECK-NEXT:    or.b16 %rs36, %rs33, %rs31;
+; CHECK-NEXT:    mov.b32 %r29, {%rs36, %rs35};
+; CHECK-NEXT:    mov.b32 {%rs37, %rs38}, %r25;
+; CHECK-NEXT:    or.b16 %rs39, %rs38, %rs28;
+; CHECK-NEXT:    or.b16 %rs40, %rs37, %rs27;
+; CHECK-NEXT:    mov.b32 %r30, {%rs40, %rs39};
+; CHECK-NEXT:    mov.b32 {%rs41, %rs42}, %r23;
+; CHECK-NEXT:    or.b16 %rs43, %rs42, %rs24;
+; CHECK-NEXT:    or.b16 %rs44, %rs41, %rs23;
+; CHECK-NEXT:    mov.b32 %r31, {%rs44, %rs43};
+; CHECK-NEXT:    mov.b32 {%rs45, %rs46}, %r21;
+; CHECK-NEXT:    or.b16 %rs47, %rs46, %rs20;
+; CHECK-NEXT:    or.b16 %rs48, %rs45, %rs19;
+; CHECK-NEXT:    mov.b32 %r32, {%rs48, %rs47};
+; CHECK-NEXT:    st.param.v4.b32 [func_retval0+0], {%r32, %r31, %r30, %r29};
+; CHECK-NEXT:    ret;
+  %mx = and <8 x i16> %x, %mask
+  %notmask = xor <8 x i16> %mask, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+  %my = and <8 x i16> %y, %notmask
+  %r = or <8 x i16> %mx, %my
+  ret <8 x i16> %r
+}
+
+define <4 x i32> @out_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) nounwind {
+; CHECK-LABEL: out_v4i32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<29>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.u32 {%r1, %r2, %r3, %r4}, [out_v4i32_param_0];
+; CHECK-NEXT:    ld.param.v4.u32 {%r5, %r6, %r7, %r8}, [out_v4i32_param_2];
+; CHECK-NEXT:    and.b32 %r9, %r1, %r5;
+; CHECK-NEXT:    and.b32 %r10, %r2, %r6;
+; CHECK-NEXT:    and.b32 %r11, %r3, %r7;
+; CHECK-NEXT:    and.b32 %r12, %r4, %r8;
+; CHECK-NEXT:    ld.param.v4.u32 {%r13, %r14, %r15, %r16}, [out_v4i32_param_1];
+; CHECK-NEXT:    not.b32 %r17, %r8;
+; CHECK-NEXT:    not.b32 %r18, %r7;
+; CHECK-NEXT:    not.b32 %r19, %r6;
+; CHECK-NEXT:    not.b32 %r20, %r5;
+; CHECK-NEXT:    and.b32 %r21, %r13, %r20;
+; CHECK-NEXT:    and.b32 %r22, %r14, %r19;
+; CHECK-NEXT:    and.b32 %r23, %r15, %r18;
+; CHECK-NEXT:    and.b32 %r24, %r16, %r17;
+; CHECK-NEXT:    or.b32 %r25, %r12, %r24;
+; CHECK-NEXT:    or.b32 %r26, %r11, %r23;
+; CHECK-NEXT:    or.b32 %r27, %r10, %r22;
+; CHECK-NEXT:    or.b32 %r28, %r9, %r21;
+; CHECK-NEXT:    st.param.v4.b32 [func_retval0+0], {%r28, %r27, %r26, %r25};
+; CHECK-NEXT:    ret;
+  %mx = and <4 x i32> %x, %mask
+  %notmask = xor <4 x i32> %mask, <i32 -1, i32 -1, i32 -1, i32 -1>
+  %my = and <4 x i32> %y, %notmask
+  %r = or <4 x i32> %mx, %my
+  ret <4 x i32> %r
+}
+
+define <4 x i32> @out_v4i32_undef(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) nounwind {
+; CHECK-LABEL: out_v4i32_undef(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<26>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.u32 {%r1, %r2, %r3, %r4}, [out_v4i32_undef_param_0];
+; CHECK-NEXT:    ld.param.v4.u32 {%r5, %r6, %r7, %r8}, [out_v4i32_undef_param_2];
+; CHECK-NEXT:    and.b32 %r9, %r3, %r7;
+; CHECK-NEXT:    and.b32 %r10, %r1, %r5;
+; CHECK-NEXT:    and.b32 %r11, %r2, %r6;
+; CHECK-NEXT:    and.b32 %r12, %r4, %r8;
+; CHECK-NEXT:    ld.param.v4.u32 {%r13, %r14, %r15, %r16}, [out_v4i32_undef_param_1];
+; CHECK-NEXT:    not.b32 %r17, %r8;
+; CHECK-NEXT:    not.b32 %r18, %r6;
+; CHECK-NEXT:    not.b32 %r19, %r5;
+; CHECK-NEXT:    and.b32 %r20, %r13, %r19;
+; CHECK-NEXT:    and.b32 %r21, %r14, %r18;
+; CHECK-NEXT:    and.b32 %r22, %r16, %r17;
+; CHECK-NEXT:    or.b32 %r23, %r12, %r22;
+; CHECK-NEXT:    or.b32 %r24, %r11, %r21;
+; CHECK-NEXT:    or.b32 %r25, %r10, %r20;
+; CHECK-NEXT:    st.param.v4.b32 [func_retval0+0], {%r25, %r24, %r9, %r23};
+; CHECK-NEXT:    ret;
+  %mx = and <4 x i32> %x, %mask
+  %notmask = xor <4 x i32> %mask, <i32 -1, i32 -1, i32 undef, i32 -1>
+  %my = and <4 x i32> %y, %notmask
+  %r = or <4 x i32> %mx, %my
+  ret <4 x i32> %r
+}
+
+define <2 x i64> @out_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) nounwind {
+; CHECK-LABEL: out_v2i64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b64 %rd<15>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.u64 {%rd1, %rd2}, [out_v2i64_param_0];
+; CHECK-NEXT:    ld.param.v2.u64 {%rd3, %rd4}, [out_v2i64_param_2];
+; CHECK-NEXT:    and.b64 %rd5, %rd1, %rd3;
+; CHECK-NEXT:    and.b64 %rd6, %rd2, %rd4;
+; CHECK-NEXT:    ld.param.v2.u64 {%rd7, %rd8}, [out_v2i64_param_1];
+; CHECK-NEXT:    not.b64 %rd9, %rd4;
+; CHECK-NEXT:    not.b64 %rd10, %rd3;
+; CHECK-NEXT:    and.b64 %rd11, %rd7, %rd10;
+; CHECK-NEXT:    and.b64 %rd12, %rd8, %rd9;
+; CHECK-NEXT:    or.b64 %rd13, %rd6, %rd12;
+; CHECK-NEXT:    or.b64 %rd14, %rd5, %rd11;
+; CHECK-NEXT:    st.param.v2.b64 [func_retval0+0], {%rd14, %rd13};
+; CHECK-NEXT:    ret;
+  %mx = and <2 x i64> %x, %mask
+  %notmask = xor <2 x i64> %mask, <i64 -1, i64 -1>
+  %my = and <2 x i64> %y, %notmask
+  %r = or <2 x i64> %mx, %my
+  ret <2 x i64> %r
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Should be the same as the previous one.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; ============================================================================ ;
+; 8-bit vector width
+; ============================================================================ ;
+
+define <1 x i8> @in_v1i8(<1 x i8> %x, <1 x i8> %y, <1 x i8> %mask) nounwind {
+; CHECK-LABEL: in_v1i8(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.u8 %rs1, [in_v1i8_param_0];
+; CHECK-NEXT:    ld.param.u8 %rs2, [in_v1i8_param_1];
+; CHECK-NEXT:    xor.b16 %rs3, %rs1, %rs2;
+; CHECK-NEXT:    ld.param.u8 %rs4, [in_v1i8_param_2];
+; CHECK-NEXT:    and.b16 %rs5, %rs3, %rs4;
+; CHECK-NEXT:    xor.b16 %rs6, %rs5, %rs2;
+; CHECK-NEXT:    st.param.b8 [func_retval0+0], %rs6;
+; CHECK-NEXT:    ret;
+  %n0 = xor <1 x i8> %x, %y
+  %n1 = and <1 x i8> %n0, %mask
+  %r = xor <1 x i8> %n1, %y
+  ret <1 x i8> %r
+}
+
+; ============================================================================ ;
+; 16-bit vector width
+; ============================================================================ ;
+
+define <1 x i16> @in_v1i16(<1 x i16> %x, <1 x i16> %y, <1 x i16> %mask) nounwind {
+; CHECK-LABEL: in_v1i16(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.u16 %rs1, [in_v1i16_param_0];
+; CHECK-NEXT:    ld.param.u16 %rs2, [in_v1i16_param_1];
+; CHECK-NEXT:    xor.b16 %rs3, %rs1, %rs2;
+; CHECK-NEXT:    ld.param.u16 %rs4, [in_v1i16_param_2];
+; CHECK-NEXT:    and.b16 %rs5, %rs3, %rs4;
+; CHECK-NEXT:    xor.b16 %rs6, %rs5, %rs2;
+; CHECK-NEXT:    st.param.b16 [func_retval0+0], %rs6;
+; CHECK-NEXT:    ret;
+  %n0 = xor <1 x i16> %x, %y
+  %n1 = and <1 x i16> %n0, %mask
+  %r = xor <1 x i16> %n1, %y
+  ret <1 x i16> %r
+}
+
+; ============================================================================ ;
+; 32-bit vector width
+; ============================================================================ ;
+
+define <4 x i8> @in_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind {
+; CHECK-LABEL: in_v4i8(
+; CHECK:       {
+; CHECK-NEXT:    .local .align 2 .b8 __local_depot18[4];
+; CHECK-NEXT:    .reg .b64 %SP;
+; CHECK-NEXT:    .reg .b64 %SPL;
+; CHECK-NEXT:    .reg .b16 %rs<32>;
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    mov.u64 %SPL, __local_depot18;
+; CHECK-NEXT:    cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs1, %rs2, %rs3, %rs4}, [in_v4i8_param_0];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs5, %rs6, %rs7, %rs8}, [in_v4i8_param_1];
+; CHECK-NEXT:    xor.b16 %rs9, %rs4, %rs8;
+; CHECK-NEXT:    xor.b16 %rs10, %rs3, %rs7;
+; CHECK-NEXT:    xor.b16 %rs11, %rs2, %rs6;
+; CHECK-NEXT:    xor.b16 %rs12, %rs1, %rs5;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs13, %rs14, %rs15, %rs16}, [in_v4i8_param_2];
+; CHECK-NEXT:    and.b16 %rs17, %rs12, %rs13;
+; CHECK-NEXT:    and.b16 %rs18, %rs11, %rs14;
+; CHECK-NEXT:    and.b16 %rs19, %rs10, %rs15;
+; CHECK-NEXT:    and.b16 %rs20, %rs9, %rs16;
+; CHECK-NEXT:    xor.b16 %rs21, %rs20, %rs8;
+; CHECK-NEXT:    xor.b16 %rs22, %rs19, %rs7;
+; CHECK-NEXT:    mov.b32 %r1, {%rs22, %rs21};
+; CHECK-NEXT:    mov.b32 {%rs23, %rs24}, %r1;
+; CHECK-NEXT:    st.v2.u8 [%SP+0], {%rs23, %rs24};
+; CHECK-NEXT:    xor.b16 %rs25, %rs18, %rs6;
+; CHECK-NEXT:    xor.b16 %rs26, %rs17, %rs5;
+; CHECK-NEXT:    mov.b32 %r2, {%rs26, %rs25};
+; CHECK-NEXT:    mov.b32 {%rs27, %rs28}, %r2;
+; CHECK-NEXT:    st.v2.u8 [%SP+2], {%rs27, %rs28};
+; CHECK-NEXT:    ld.u16 %r3, [%SP+0];
+; CHECK-NEXT:    shl.b32 %r4, %r3, 16;
+; CHECK-NEXT:    ld.u16 %r5, [%SP+2];
+; CHECK-NEXT:    or.b32 %r6, %r5, %r4;
+; CHECK-NEXT:    shr.u32 %r7, %r6, 8;
+; CHECK-NEXT:    cvt.u16.u32 %rs29, %r7;
+; CHECK-NEXT:    cvt.u16.u32 %rs30, %r3;
+; CHECK-NEXT:    bfe.s32 %r8, %r3, 8, 8;
+; CHECK-NEXT:    cvt.u16.u32 %rs31, %r8;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+0], {%rs27, %rs29, %rs30, %rs31};
+; CHECK-NEXT:    ret;
+  %n0 = xor <4 x i8> %x, %y
+  %n1 = and <4 x i8> %n0, %mask
+  %r = xor <4 x i8> %n1, %y
+  ret <4 x i8> %r
+}
+
+define <2 x i16> @in_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwind {
+; CHECK-LABEL: in_v2i16(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.u32 %r1, [in_v2i16_param_0];
+; CHECK-NEXT:    ld.param.u32 %r2, [in_v2i16_param_1];
+; CHECK-NEXT:    xor.b32 %r3, %r1, %r2;
+; CHECK-NEXT:    ld.param.u32 %r4, [in_v2i16_param_2];
+; CHECK-NEXT:    and.b32 %r5, %r3, %r4;
+; CHECK-NEXT:    xor.b32 %r6, %r5, %r2;
+; CHECK-NEXT:    st.param.b32 [func_retval0+0], %r6;
+; CHECK-NEXT:    ret;
+  %n0 = xor <2 x i16> %x, %y
+  %n1 = and <2 x i16> %n0, %mask
+  %r = xor <2 x i16> %n1, %y
+  ret <2 x i16> %r
+}
+
+define <1 x i32> @in_v1i32(<1 x i32> %x, <1 x i32> %y, <1 x i32> %mask) nounwind {
+; CHECK-LABEL: in_v1i32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.u32 %r1, [in_v1i32_param_0];
+; CHECK-NEXT:    ld.param.u32 %r2, [in_v1i32_param_1];
+; CHECK-NEXT:    xor.b32 %r3, %r1, %r2;
+; CHECK-NEXT:    ld.param.u32 %r4, [in_v1i32_param_2];
+; CHECK-NEXT:    and.b32 %r5, %r3, %r4;
+; CHECK-NEXT:    xor.b32 %r6, %r5, %r2;
+; CHECK-NEXT:    st.param.b32 [func_retval0+0], %r6;
+; CHECK-NEXT:    ret;
+  %n0 = xor <1 x i32> %x, %y
+  %n1 = and <1 x i32> %n0, %mask
+  %r = xor <1 x i32> %n1, %y
+  ret <1 x i32> %r
+}
+
+; ============================================================================ ;
+; 64-bit vector width
+; ============================================================================ ;
+
+define <8 x i8> @in_v8i8(<8 x i8> %x, <8 x i8> %y, <8 x i8> %mask) nounwind {
+; CHECK-LABEL: in_v8i8(
+; CHECK:       {
+; CHECK-NEXT:    .local .align 2 .b8 __local_depot21[8];
+; CHECK-NEXT:    .reg .b64 %SP;
+; CHECK-NEXT:    .reg .b64 %SPL;
+; CHECK-NEXT:    .reg .b16 %rs<64>;
+; CHECK-NEXT:    .reg .b32 %r<14>;
+; CHECK-NEXT:    .reg .b64 %rd<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    mov.u64 %SPL, __local_depot21;
+; CHECK-NEXT:    cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs1, %rs2, %rs3, %rs4}, [in_v8i8_param_0+4];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs5, %rs6, %rs7, %rs8}, [in_v8i8_param_0];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs9, %rs10, %rs11, %rs12}, [in_v8i8_param_1];
+; CHECK-NEXT:    xor.b16 %rs13, %rs8, %rs12;
+; CHECK-NEXT:    xor.b16 %rs14, %rs7, %rs11;
+; CHECK-NEXT:    xor.b16 %rs15, %rs6, %rs10;
+; CHECK-NEXT:    xor.b16 %rs16, %rs5, %rs9;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs17, %rs18, %rs19, %rs20}, [in_v8i8_param_1+4];
+; CHECK-NEXT:    xor.b16 %rs21, %rs4, %rs20;
+; CHECK-NEXT:    xor.b16 %rs22, %rs3, %rs19;
+; CHECK-NEXT:    xor.b16 %rs23, %rs2, %rs18;
+; CHECK-NEXT:    xor.b16 %rs24, %rs1, %rs17;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs25, %rs26, %rs27, %rs28}, [in_v8i8_param_2+4];
+; CHECK-NEXT:    and.b16 %rs29, %rs24, %rs25;
+; CHECK-NEXT:    and.b16 %rs30, %rs23, %rs26;
+; CHECK-NEXT:    and.b16 %rs31, %rs22, %rs27;
+; CHECK-NEXT:    and.b16 %rs32, %rs21, %rs28;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs33, %rs34, %rs35, %rs36}, [in_v8i8_param_2];
+; CHECK-NEXT:    and.b16 %rs37, %rs16, %rs33;
+; CHECK-NEXT:    and.b16 %rs38, %rs15, %rs34;
+; CHECK-NEXT:    and.b16 %rs39, %rs14, %rs35;
+; CHECK-NEXT:    and.b16 %rs40, %rs13, %rs36;
+; CHECK-NEXT:    xor.b16 %rs41, %rs40, %rs12;
+; CHECK-NEXT:    xor.b16 %rs42, %rs39, %rs11;
+; CHECK-NEXT:    mov.b32 %r1, {%rs42, %rs41};
+; CHECK-NEXT:    mov.b32 {%rs43, %rs44}, %r1;
+; CHECK-NEXT:    st.v2.u8 [%SP+0], {%rs43, %rs44};
+; CHECK-NEXT:    xor.b16 %rs45, %rs38, %rs10;
+; CHECK-NEXT:    xor.b16 %rs46, %rs37, %rs9;
+; CHECK-NEXT:    mov.b32 %r2, {%rs46, %rs45};
+; CHECK-NEXT:    mov.b32 {%rs47, %rs48}, %r2;
+; CHECK-NEXT:    st.v2.u8 [%SP+2], {%rs47, %rs48};
+; CHECK-NEXT:    xor.b16 %rs49, %rs32, %rs20;
+; CHECK-NEXT:    xor.b16 %rs50, %rs31, %rs19;
+; CHECK-NEXT:    mov.b32 %r3, {%rs50, %rs49};
+; CHECK-NEXT:    mov.b32 {%rs51, %rs52}, %r3;
+; CHECK-NEXT:    st.v2.u8 [%SP+4], {%rs51, %rs52};
+; CHECK-NEXT:    xor.b16 %rs53, %rs30, %rs18;
+; CHECK-NEXT:    xor.b16 %rs54, %rs29, %rs17;
+; CHECK-NEXT:    mov.b32 %r4, {%rs54, %rs53};
+; CHECK-NEXT:    mov.b32 {%rs55, %rs56}, %r4;
+; CHECK-NEXT:    st.v2.u8 [%SP+6], {%rs55, %rs56};
+; CHECK-NEXT:    ld.u16 %r5, [%SP+0];
+; CHECK-NEXT:    shl.b32 %r6, %r5, 16;
+; CHECK-NEXT:    ld.u16 %r7, [%SP+2];
+; CHECK-NEXT:    or.b32 %r8, %r7, %r6;
+; CHECK-NEXT:    cvt.u64.u32 %rd1, %r8;
+; CHECK-NEXT:    ld.u16 %r9, [%SP+4];
+; CHECK-NEXT:    shl.b32 %r10, %r9, 16;
+; CHECK-NEXT:    ld.u16 %r11, [%SP+6];
+; CHECK-NEXT:    or.b32 %r12, %r11, %r10;
+; CHECK-NEXT:    cvt.u64.u32 %rd2, %r12;
+; CHECK-NEXT:    shl.b64 %rd3, %rd2, 32;
+; CHECK-NEXT:    or.b64 %rd4, %rd1, %rd3;
+; CHECK-NEXT:    shr.u32 %r13, %r12, 8;
+; CHECK-NEXT:    shr.u64 %rd5, %rd4, 24;
+; CHECK-NEXT:    cvt.u16.u64 %rs57, %rd5;
+; CHECK-NEXT:    shr.u64 %rd6, %rd1, 16;
+; CHECK-NEXT:    cvt.u16.u64 %rs58, %rd6;
+; CHECK-NEXT:    shr.u64 %rd7, %rd1, 8;
+; CHECK-NEXT:    cvt.u16.u64 %rs59, %rd7;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+0], {%rs47, %rs59, %rs58, %rs57};
+; CHECK-NEXT:    cvt.u16.u32 %rs60, %r13;
+; CHECK-NEXT:    bfe.s64 %rd8, %rd2, 24, 8;
+; CHECK-NEXT:    cvt.u16.u64 %rs61, %rd8;
+; CHECK-NEXT:    cvt.u16.u32 %rs62, %r9;
+; CHECK-NEXT:    cvt.u16.u32 %rs63, %r11;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+4], {%rs63, %rs60, %rs62, %rs61};
+; CHECK-NEXT:    ret;
+  %n0 = xor <8 x i8> %x, %y
+  %n1 = and <8 x i8> %n0, %mask
+  %r = xor <8 x i8> %n1, %y
+  ret <8 x i8> %r
+}
+
+define <4 x i16> @in_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind {
+; CHECK-LABEL: in_v4i16(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<15>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.u32 {%r1, %r2}, [in_v4i16_param_0];
+; CHECK-NEXT:    ld.param.v2.u32 {%r3, %r4}, [in_v4i16_param_1];
+; CHECK-NEXT:    xor.b32 %r5, %r2, %r4;
+; CHECK-NEXT:    xor.b32 %r6, %r1, %r3;
+; CHECK-NEXT:    ld.param.v2.u32 {%r7, %r8}, [in_v4i16_param_2];
+; CHECK-NEXT:    and.b32 %r9, %r6, %r7;
+; CHECK-NEXT:    and.b32 %r10, %r5, %r8;
+; CHECK-NEXT:    xor.b32 %r11, %r10, %r4;
+; CHECK-NEXT:    xor.b32 %r13, %r9, %r3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0+0], {%r13, %r11};
+; CHECK-NEXT:    ret;
+  %n0 = xor <4 x i16> %x, %y
+  %n1 = and <4 x i16> %n0, %mask
+  %r = xor <4 x i16> %n1, %y
+  ret <4 x i16> %r
+}
+
+define <2 x i32> @in_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %mask) nounwind {
+; CHECK-LABEL: in_v2i32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<13>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.u32 {%r1, %r2}, [in_v2i32_param_0];
+; CHECK-NEXT:    ld.param.v2.u32 {%r3, %r4}, [in_v2i32_param_1];
+; CHECK-NEXT:    xor.b32 %r5, %r2, %r4;
+; CHECK-NEXT:    xor.b32 %r6, %r1, %r3;
+; CHECK-NEXT:    ld.param.v2.u32 {%r7, %r8}, [in_v2i32_param_2];
+; CHECK-NEXT:    and.b32 %r9, %r6, %r7;
+; CHECK-NEXT:    and.b32 %r10, %r5, %r8;
+; CHECK-NEXT:    xor.b32 %r11, %r10, %r4;
+; CHECK-NEXT:    xor.b32 %r12, %r9, %r3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0+0], {%r12, %r11};
+; CHECK-NEXT:    ret;
+  %n0 = xor <2 x i32> %x, %y
+  %n1 = and <2 x i32> %n0, %mask
+  %r = xor <2 x i32> %n1, %y
+  ret <2 x i32> %r
+}
+
+define <1 x i64> @in_v1i64(<1 x i64> %x, <1 x i64> %y, <1 x i64> %mask) nounwind {
+; CHECK-LABEL: in_v1i64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.u64 %rd1, [in_v1i64_param_0];
+; CHECK-NEXT:    ld.param.u64 %rd2, [in_v1i64_param_1];
+; CHECK-NEXT:    xor.b64 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    ld.param.u64 %rd4, [in_v1i64_param_2];
+; CHECK-NEXT:    and.b64 %rd5, %rd3, %rd4;
+; CHECK-NEXT:    xor.b64 %rd6, %rd5, %rd2;
+; CHECK-NEXT:    st.param.b64 [func_retval0+0], %rd6;
+; CHECK-NEXT:    ret;
+  %n0 = xor <1 x i64> %x, %y
+  %n1 = and <1 x i64> %n0, %mask
+  %r = xor <1 x i64> %n1, %y
+  ret <1 x i64> %r
+}
+
+; ============================================================================ ;
+; 128-bit vector width
+; ============================================================================ ;
+
+define <16 x i8> @in_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) nounwind {
+; CHECK-LABEL: in_v16i8(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<113>;
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.u8 {%rs1, %rs2, %rs3, %rs4}, [in_v16i8_param_0];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs5, %rs6, %rs7, %rs8}, [in_v16i8_param_0+4];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs9, %rs10, %rs11, %rs12}, [in_v16i8_param_0+8];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs13, %rs14, %rs15, %rs16}, [in_v16i8_param_0+12];
+; CHECK-NEXT:    ld.param.v4.u8 {%rs17, %rs18, %rs19, %rs20}, [in_v16i8_param_1+12];
+; CHECK-NEXT:    xor.b16 %rs21, %rs14, %rs18;
+; CHECK-NEXT:    xor.b16 %rs22, %rs13, %rs17;
+; CHECK-NEXT:    xor.b16 %rs23, %rs16, %rs20;
+; CHECK-NEXT:    xor.b16 %rs24, %rs15, %rs19;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs25, %rs26, %rs27, %rs28}, [in_v16i8_param_1+8];
+; CHECK-NEXT:    xor.b16 %rs29, %rs10, %rs26;
+; CHECK-NEXT:    xor.b16 %rs30, %rs9, %rs25;
+; CHECK-NEXT:    xor.b16 %rs31, %rs12, %rs28;
+; CHECK-NEXT:    xor.b16 %rs32, %rs11, %rs27;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs33, %rs34, %rs35, %rs36}, [in_v16i8_param_1+4];
+; CHECK-NEXT:    xor.b16 %rs37, %rs6, %rs34;
+; CHECK-NEXT:    xor.b16 %rs38, %rs5, %rs33;
+; CHECK-NEXT:    xor.b16 %rs39, %rs8, %rs36;
+; CHECK-NEXT:    xor.b16 %rs40, %rs7, %rs35;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs41, %rs42, %rs43, %rs44}, [in_v16i8_param_1];
+; CHECK-NEXT:    xor.b16 %rs45, %rs2, %rs42;
+; CHECK-NEXT:    xor.b16 %rs46, %rs1, %rs41;
+; CHECK-NEXT:    xor.b16 %rs47, %rs4, %rs44;
+; CHECK-NEXT:    xor.b16 %rs48, %rs3, %rs43;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs49, %rs50, %rs51, %rs52}, [in_v16i8_param_2];
+; CHECK-NEXT:    and.b16 %rs53, %rs48, %rs51;
+; CHECK-NEXT:    and.b16 %rs54, %rs47, %rs52;
+; CHECK-NEXT:    and.b16 %rs55, %rs46, %rs49;
+; CHECK-NEXT:    and.b16 %rs56, %rs45, %rs50;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs57, %rs58, %rs59, %rs60}, [in_v16i8_param_2+4];
+; CHECK-NEXT:    and.b16 %rs61, %rs40, %rs59;
+; CHECK-NEXT:    and.b16 %rs62, %rs39, %rs60;
+; CHECK-NEXT:    and.b16 %rs63, %rs38, %rs57;
+; CHECK-NEXT:    and.b16 %rs64, %rs37, %rs58;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs65, %rs66, %rs67, %rs68}, [in_v16i8_param_2+8];
+; CHECK-NEXT:    and.b16 %rs69, %rs32, %rs67;
+; CHECK-NEXT:    and.b16 %rs70, %rs31, %rs68;
+; CHECK-NEXT:    and.b16 %rs71, %rs30, %rs65;
+; CHECK-NEXT:    and.b16 %rs72, %rs29, %rs66;
+; CHECK-NEXT:    ld.param.v4.u8 {%rs73, %rs74, %rs75, %rs76}, [in_v16i8_param_2+12];
+; CHECK-NEXT:    and.b16 %rs77, %rs24, %rs75;
+; CHECK-NEXT:    and.b16 %rs78, %rs23, %rs76;
+; CHECK-NEXT:    and.b16 %rs79, %rs22, %rs73;
+; CHECK-NEXT:    and.b16 %rs80, %rs21, %rs74;
+; CHECK-NEXT:    xor.b16 %rs81, %rs80, %rs18;
+; CHECK-NEXT:    xor.b16 %rs82, %rs79, %rs17;
+; CHECK-NEXT:    mov.b32 %r1, {%rs82, %rs81};
+; CHECK-NEXT:    xor.b16 %rs83, %rs78, %rs20;
+; CHECK-NEXT:    xor.b16 %rs84, %rs77, %rs19;
+; CHECK-NEXT:    mov.b32 %r2, {%rs84, %rs83};
+; CHECK-NEXT:    xor.b16 %rs85, %rs72, %rs26;
+; CHECK-NEXT:    xor.b16 %rs86, %rs71, %rs25;
+; CHECK-NEXT:    mov.b32 %r3, {%rs86, %rs85};
+; CHECK-NEXT:    xor.b16 %rs87, %rs70, %rs28;
+; CHECK-NEXT:    xor.b16 %rs88, %rs69, %rs27;
+; CHECK-NEXT:    mov.b32 %r4, {%rs88, %rs87};
+; CHECK-NEXT:    xor.b16 %rs89, %rs64, %rs34;
+; CHECK-NEXT:    xor.b16 %rs90, %rs63, %rs33;
+; CHECK-NEXT:    mov.b32 %r5, {%rs90, %rs89};
+; CHECK-NEXT:    xor.b16 %rs91, %rs62, %rs36;
+; CHECK-NEXT:    xor.b16 %rs92, %rs61, %rs35;
+; CHECK-NEXT:    mov.b32 %r6, {%rs92, %rs91};
+; CHECK-NEXT:    xor.b16 %rs93, %rs56, %rs42;
+; CHECK-NEXT:    xor.b16 %rs94, %rs55, %rs41;
+; CHECK-NEXT:    mov.b32 %r7, {%rs94, %rs93};
+; CHECK-NEXT:    xor.b16 %rs95, %rs54, %rs44;
+; CHECK-NEXT:    xor.b16 %rs96, %rs53, %rs43;
+; CHECK-NEXT:    mov.b32 %r8, {%rs96, %rs95};
+; CHECK-NEXT:    mov.b32 {%rs97, %rs98}, %r8;
+; CHECK-NEXT:    mov.b32 {%rs99, %rs100}, %r7;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+0], {%rs99, %rs100, %rs97, %rs98};
+; CHECK-NEXT:    mov.b32 {%rs101, %rs102}, %r6;
+; CHECK-NEXT:    mov.b32 {%rs103, %rs104}, %r5;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+4], {%rs103, %rs104, %rs101, %rs102};
+; CHECK-NEXT:    mov.b32 {%rs105, %rs106}, %r4;
+; CHECK-NEXT:    mov.b32 {%rs107, %rs108}, %r3;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+8], {%rs107, %rs108, %rs105, %rs106};
+; CHECK-NEXT:    mov.b32 {%rs109, %rs110}, %r2;
+; CHECK-NEXT:    mov.b32 {%rs111, %rs112}, %r1;
+; CHECK-NEXT:    st.param.v4.b8 [func_retval0+12], {%rs111, %rs112, %rs109, %rs110};
+; CHECK-NEXT:    ret;
+  %n0 = xor <16 x i8> %x, %y
+  %n1 = and <16 x i8> %n0, %mask
+  %r = xor <16 x i8> %n1, %y
+  ret <16 x i8> %r
+}
+
+define <8 x i16> @in_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) nounwind {
+; CHECK-LABEL: in_v8i16(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<29>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.u32 {%r1, %r2, %r3, %r4}, [in_v8i16_param_0];
+; CHECK-NEXT:    ld.param.v4.u32 {%r5, %r6, %r7, %r8}, [in_v8i16_param_1];
+; CHECK-NEXT:    xor.b32 %r9, %r4, %r8;
+; CHECK-NEXT:    xor.b32 %r10, %r3, %r7;
+; CHECK-NEXT:    xor.b32 %r11, %r2, %r6;
+; CHECK-NEXT:    xor.b32 %r12, %r1, %r5;
+; CHECK-NEXT:    ld.param.v4.u32 {%r13, %r14, %r15, %r16}, [in_v8i16_param_2];
+; CHECK-NEXT:    and.b32 %r17, %r12, %r13;
+; CHECK-NEXT:    and.b32 %r18, %r11, %r14;
+; CHECK-NEXT:    and.b32 %r19, %r10, %r15;
+; CHECK-NEXT:    and.b32 %r20, %r9, %r16;
+; CHECK-NEXT:    xor.b32 %r21, %r20, %r8;
+; CHECK-NEXT:    xor.b32 %r23, %r19, %r7;
+; CHECK-NEXT:    xor.b32 %r25, %r18, %r6;
+; CHECK-NEXT:    xor.b32 %r27, %r17, %r5;
+; CHECK-NEXT:    st.param.v4.b32 [func_retval0+0], {%r27, %r25, %r23, %r21};
+; CHECK-NEXT:    ret;
+  %n0 = xor <8 x i16> %x, %y
+  %n1 = and <8 x i16> %n0, %mask
+  %r = xor <8 x i16> %n1, %y
+  ret <8 x i16> %r
+}
+
+define <4 x i32> @in_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) nounwind {
+; CHECK-LABEL: in_v4i32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<25>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.u32 {%r1, %r2, %r3, %r4}, [in_v4i32_param_0];
+; CHECK-NEXT:    ld.param.v4.u32 {%r5, %r6, %r7, %r8}, [in_v4i32_param_1];
+; CHECK-NEXT:    xor.b32 %r9, %r4, %r8;
+; CHECK-NEXT:    xor.b32 %r10, %r3, %r7;
+; CHECK-NEXT:    xor.b32 %r11, %r2, %r6;
+; CHECK-NEXT:    xor.b32 %r12, %r1, %r5;
+; CHECK-NEXT:    ld.param.v4.u32 {%r13, %r14, %r15, %r16}, [in_v4i32_param_2];
+; CHECK-NEXT:    and.b32 %r17, %r12, %r13;
+; CHECK-NEXT:    and.b32 %r18, %r11, %r14;
+; CHECK-NEXT:    and.b32 %r19, %r10, %r15;
+; CHECK-NEXT:    and.b32 %r20, %r9, %r16;
+; CHECK-NEXT:    xor.b32 %r21, %r20, %r8;
+; CHECK-NEXT:    xor.b32 %r22, %r19, %r7;
+; CHECK-NEXT:    xor.b32 %r23, %r18, %r6;
+; CHECK-NEXT:    xor.b32 %r24, %r17, %r5;
+; CHECK-NEXT:    st.param.v4.b32 [func_retval0+0], {%r24, %r23, %r22, %r21};
+; CHECK-NEXT:    ret;
+  %n0 = xor <4 x i32> %x, %y
+  %n1 = and <4 x i32> %n0, %mask
+  %r = xor <4 x i32> %n1, %y
+  ret <4 x i32> %r
+}
+
+define <2 x i64> @in_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) nounwind {
+; CHECK-LABEL: in_v2i64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b64 %rd<13>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.u64 {%rd1, %rd2}, [in_v2i64_param_0];
+; CHECK-NEXT:    ld.param.v2.u64 {%rd3, %rd4}, [in_v2i64_param_1];
+; CHECK-NEXT:    xor.b64 %rd5, %rd2, %rd4;
+; CHECK-NEXT:    xor.b64 %rd6, %rd1, %rd3;
+; CHECK-NEXT:    ld.param.v2.u64 {%rd7, %rd8}, [in_v2i64_param_2];
+; CHECK-NEXT:    and.b64 %rd9, %rd6, %rd7;
+; CHECK-NEXT:    and.b64 %rd10, %rd5, %rd8;
+; CHECK-NEXT:    xor.b64 %rd11, %rd10, %rd4;
+; CHECK-NEXT:    xor.b64 %rd12, %rd9, %rd3;
+; CHECK-NEXT:    st.param.v2.b64 [func_retval0+0], {%rd12, %rd11};
+; CHECK-NEXT:    ret;
+  %n0 = xor <2 x i64> %x, %y
+  %n1 = and <2 x i64> %n0, %mask
+  %r = xor <2 x i64> %n1, %y
+  ret <2 x i64> %r
+}


        


More information about the llvm-commits mailing list