[llvm] c9737b6 - [X86] Add regression test case from rG057db2002bb3

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 29 06:20:47 PDT 2022


Author: Simon Pilgrim
Date: 2022-07-29T14:20:35+01:00
New Revision: c9737b6f1818056de3a69e43150d501ac0ee2851

URL: https://github.com/llvm/llvm-project/commit/c9737b6f1818056de3a69e43150d501ac0ee2851
DIFF: https://github.com/llvm/llvm-project/commit/c9737b6f1818056de3a69e43150d501ac0ee2851.diff

LOG: [X86] Add regression test case from rG057db2002bb3

When constant folding "ANDNP(C,X) -> AND(~C,X)" we hit cases such as this where we interfered with the "OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))" fold in canonicalizeBitSelect

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/combine-bitselect.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/combine-bitselect.ll b/llvm/test/CodeGen/X86/combine-bitselect.ll
index a3e3f9b6ce381..43fd9243ea579 100644
--- a/llvm/test/CodeGen/X86/combine-bitselect.ll
+++ b/llvm/test/CodeGen/X86/combine-bitselect.ll
@@ -1061,3 +1061,133 @@ bb:
   ret <4 x i1> %tmp4
 }
 
+; Regression reported on 057db2002bb3d79429db3c5fe436c8cefc50cb25
+ at d = external global <2 x i64>, align 16
+define void @constantfold_andn_mask() nounwind {
+; SSE-LABEL: constantfold_andn_mask:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    pushq %rax
+; SSE-NEXT:    callq use at PLT
+; SSE-NEXT:    movdqu (%rax), %xmm1
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [31,248,31,248,31,248,31,248,31,248,31,248,31,248,31,248]
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    pavgb %xmm2, %xmm0
+; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    pandn %xmm0, %xmm2
+; SSE-NEXT:    por %xmm1, %xmm2
+; SSE-NEXT:    movabsq $87960930222080, %rax # imm = 0x500000000000
+; SSE-NEXT:    xorq d at GOTPCREL(%rip), %rax
+; SSE-NEXT:    movdqa %xmm2, (%rax)
+; SSE-NEXT:    popq %rax
+; SSE-NEXT:    retq
+;
+; XOP-LABEL: constantfold_andn_mask:
+; XOP:       # %bb.0: # %entry
+; XOP-NEXT:    pushq %rax
+; XOP-NEXT:    callq use at PLT
+; XOP-NEXT:    vmovdqu (%rax), %xmm1
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm2 = [31,248,31,248,31,248,31,248,31,248,31,248,31,248,31,248]
+; XOP-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; XOP-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; XOP-NEXT:    vpavgb %xmm2, %xmm0, %xmm0
+; XOP-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    vpandn %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; XOP-NEXT:    movabsq $87960930222080, %rax # imm = 0x500000000000
+; XOP-NEXT:    xorq d at GOTPCREL(%rip), %rax
+; XOP-NEXT:    vmovdqa %xmm0, (%rax)
+; XOP-NEXT:    popq %rax
+; XOP-NEXT:    retq
+;
+; AVX1-LABEL: constantfold_andn_mask:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    pushq %rax
+; AVX1-NEXT:    callq use at PLT
+; AVX1-NEXT:    vmovdqu (%rax), %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [31,248,31,248,31,248,31,248,31,248,31,248,31,248,31,248]
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpavgb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpandn %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    movabsq $87960930222080, %rax # imm = 0x500000000000
+; AVX1-NEXT:    xorq d at GOTPCREL(%rip), %rax
+; AVX1-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX1-NEXT:    popq %rax
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: constantfold_andn_mask:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    pushq %rax
+; AVX2-NEXT:    callq use at PLT
+; AVX2-NEXT:    vmovdqu (%rax), %xmm1
+; AVX2-NEXT:    vpbroadcastw {{.*#+}} xmm2 = [63519,63519,63519,63519,63519,63519,63519,63519]
+; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpavgb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpandn %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX2-NEXT:    movabsq $87960930222080, %rax # imm = 0x500000000000
+; AVX2-NEXT:    xorq d at GOTPCREL(%rip), %rax
+; AVX2-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX2-NEXT:    popq %rax
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: constantfold_andn_mask:
+; AVX512F:       # %bb.0: # %entry
+; AVX512F-NEXT:    pushq %rax
+; AVX512F-NEXT:    callq use at PLT
+; AVX512F-NEXT:    vmovdqu (%rax), %xmm1
+; AVX512F-NEXT:    vpbroadcastw {{.*#+}} xmm2 = [63519,63519,63519,63519,63519,63519,63519,63519]
+; AVX512F-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vpavgb %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpternlogq $184, %zmm1, %zmm2, %zmm0
+; AVX512F-NEXT:    movabsq $87960930222080, %rax # imm = 0x500000000000
+; AVX512F-NEXT:    xorq d at GOTPCREL(%rip), %rax
+; AVX512F-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512F-NEXT:    popq %rax
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: constantfold_andn_mask:
+; AVX512VL:       # %bb.0: # %entry
+; AVX512VL-NEXT:    pushq %rax
+; AVX512VL-NEXT:    callq use at PLT
+; AVX512VL-NEXT:    vmovdqu (%rax), %xmm1
+; AVX512VL-NEXT:    vpbroadcastw {{.*#+}} xmm2 = [63519,63519,63519,63519,63519,63519,63519,63519]
+; AVX512VL-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpavgb %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpternlogq $216, %xmm2, %xmm1, %xmm0
+; AVX512VL-NEXT:    movabsq $87960930222080, %rax # imm = 0x500000000000
+; AVX512VL-NEXT:    xorq d at GOTPCREL(%rip), %rax
+; AVX512VL-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512VL-NEXT:    popq %rax
+; AVX512VL-NEXT:    retq
+entry:
+  %call = call noundef <2 x i64> @use()
+  %_msret = load <2 x i64>, ptr undef, align 8
+  %i = bitcast <2 x i64> %_msret to <16 x i8>
+  %i1 = bitcast <2 x i64> %call to <16 x i8>
+  %i2 = and <16 x i8> %i, <i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8>
+  %i3 = and <16 x i8> %i1, <i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8>
+  %i4 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> <i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8, i8 31, i8 -8>, <16 x i8> %i3)
+  %i5 = bitcast <16 x i8> %i2 to <2 x i64>
+  %i6 = bitcast <16 x i8> %i4 to <2 x i64>
+  %i7 = and <2 x i64> %_msret, <i64 567462211834873824, i64 567462211834873824>
+  %i8 = xor <2 x i64> zeroinitializer, <i64 -1, i64 -1>
+  %i9 = xor <2 x i64> %i6, <i64 -1, i64 -1>
+  %i10 = and <2 x i64> %i8, %i5
+  %i11 = and <2 x i64> %i7, %i9
+  %i12 = or <2 x i64> zeroinitializer, %i10
+  %i13 = or <2 x i64> %i12, %i11
+  store <2 x i64> %i13, ptr inttoptr (i64 xor (i64 ptrtoint (ptr @d to i64), i64 87960930222080) to ptr), align 16
+  ret void
+}
+
+declare <2 x i64> @use()
+declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>)


        


More information about the llvm-commits mailing list