[llvm] r342880 - [NFC][CodeGen][X86][AArch64] More tests for 'bit field extract' w/ constants

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 24 06:24:20 PDT 2018


Author: lebedevri
Date: Mon Sep 24 06:24:20 2018
New Revision: 342880

URL: http://llvm.org/viewvc/llvm-project?rev=342880&view=rev
Log:
[NFC][CodeGen][X86][AArch64] More tests for 'bit field extract' w/ constants

It would be best to introduce ISD::BitFieldExtract,
because clearly more than one backend faces the same problem.
But for now let's solve this in the x86-specific DAG combine.

https://bugs.llvm.org/show_bug.cgi?id=38938

Modified:
    llvm/trunk/test/CodeGen/AArch64/extract-bits.ll
    llvm/trunk/test/CodeGen/X86/extract-bits.ll

Modified: llvm/trunk/test/CodeGen/AArch64/extract-bits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/extract-bits.ll?rev=342880&r1=342879&r2=342880&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/extract-bits.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/extract-bits.ll Mon Sep 24 06:24:20 2018
@@ -715,3 +715,76 @@ define i64 @bextr64_d3_load_indexzext(i6
   %masked = lshr i64 %highbitscleared, %sh_prom
   ret i64 %masked
 }
+
+; ---------------------------------------------------------------------------- ;
+; Constant
+; ---------------------------------------------------------------------------- ;
+
+; https://bugs.llvm.org/show_bug.cgi?id=38938
+define void @pr38938(i32* %a0, i64* %a1) {
+; CHECK-LABEL: pr38938:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x1]
+; CHECK-NEXT:    ubfx x8, x8, #21, #10
+; CHECK-NEXT:    lsl x8, x8, #2
+; CHECK-NEXT:    ldr w9, [x0, x8]
+; CHECK-NEXT:    add w9, w9, #1 // =1
+; CHECK-NEXT:    str w9, [x0, x8]
+; CHECK-NEXT:    ret
+  %tmp = load i64, i64* %a1, align 8
+  %tmp1 = lshr i64 %tmp, 21
+  %tmp2 = and i64 %tmp1, 1023
+  %tmp3 = getelementptr inbounds i32, i32* %a0, i64 %tmp2
+  %tmp4 = load i32, i32* %tmp3, align 4
+  %tmp5 = add nsw i32 %tmp4, 1
+  store i32 %tmp5, i32* %tmp3, align 4
+  ret void
+}
+
+; The most canonical variant
+define i32 @c0_i32(i32 %arg) {
+; CHECK-LABEL: c0_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ubfx w0, w0, #19, #10
+; CHECK-NEXT:    ret
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 1023
+  ret i32 %tmp1
+}
+
+; Should be still fine, but the mask is shifted
+define i32 @c1_i32(i32 %arg) {
+; CHECK-LABEL: c1_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsr w8, w0, #19
+; CHECK-NEXT:    and w0, w8, #0xffc
+; CHECK-NEXT:    ret
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 4092
+  ret i32 %tmp1
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define i32 @c2_i32(i32 %arg) {
+; CHECK-LABEL: c2_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ubfx w8, w0, #19, #10
+; CHECK-NEXT:    lsl w0, w8, #2
+; CHECK-NEXT:    ret
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 1023
+  %tmp2 = shl i32 %tmp1, 2
+  ret i32 %tmp2
+}
+
+; The mask covers newly shifted-in bit
+define i32 @c4_i32_bad(i32 %arg) {
+; CHECK-LABEL: c4_i32_bad:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsr w8, w0, #19
+; CHECK-NEXT:    and w0, w8, #0x1ffe
+; CHECK-NEXT:    ret
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 16382
+  ret i32 %tmp1
+}

Modified: llvm/trunk/test/CodeGen/X86/extract-bits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extract-bits.ll?rev=342880&r1=342879&r2=342880&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extract-bits.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extract-bits.ll Mon Sep 24 06:24:20 2018
@@ -5613,6 +5613,7 @@ define i64 @bextr64_d5_skipextrauses(i64
 ; Constant
 ; ---------------------------------------------------------------------------- ;
 
+; https://bugs.llvm.org/show_bug.cgi?id=38938
 define void @pr38938(i32* %a0, i64* %a1) {
 ; X86-LABEL: pr38938:
 ; X86:       # %bb.0:
@@ -5640,3 +5641,118 @@ define void @pr38938(i32* %a0, i64* %a1)
   store i32 %tmp5, i32* %tmp3, align 4
   ret void
 }
+
+; The most canonical variant
+define i32 @c0_i32(i32 %arg) {
+; X86-NOBMI-LABEL: c0_i32:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    shrl $19, %eax
+; X86-NOBMI-NEXT:    andl $1023, %eax # imm = 0x3FF
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1NOTBM-LABEL: c0_i32:
+; X86-BMI1NOTBM:       # %bb.0:
+; X86-BMI1NOTBM-NEXT:    movl $2579, %eax # imm = 0xA13
+; X86-BMI1NOTBM-NEXT:    bextrl %eax, {{[0-9]+}}(%esp), %eax
+; X86-BMI1NOTBM-NEXT:    retl
+;
+; X86-BMI1TBM-LABEL: c0_i32:
+; X86-BMI1TBM:       # %bb.0:
+; X86-BMI1TBM-NEXT:    bextrl $2579, {{[0-9]+}}(%esp), %eax # imm = 0xA13
+; X86-BMI1TBM-NEXT:    retl
+;
+; X86-BMI1NOTBMBMI2-LABEL: c0_i32:
+; X86-BMI1NOTBMBMI2:       # %bb.0:
+; X86-BMI1NOTBMBMI2-NEXT:    movl $2579, %eax # imm = 0xA13
+; X86-BMI1NOTBMBMI2-NEXT:    bextrl %eax, {{[0-9]+}}(%esp), %eax
+; X86-BMI1NOTBMBMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: c0_i32:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl %edi, %eax
+; X64-NOBMI-NEXT:    shrl $19, %eax
+; X64-NOBMI-NEXT:    andl $1023, %eax # imm = 0x3FF
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1NOTBM-LABEL: c0_i32:
+; X64-BMI1NOTBM:       # %bb.0:
+; X64-BMI1NOTBM-NEXT:    movl $2579, %eax # imm = 0xA13
+; X64-BMI1NOTBM-NEXT:    bextrl %eax, %edi, %eax
+; X64-BMI1NOTBM-NEXT:    retq
+;
+; X64-BMI1TBM-LABEL: c0_i32:
+; X64-BMI1TBM:       # %bb.0:
+; X64-BMI1TBM-NEXT:    bextrl $2579, %edi, %eax # imm = 0xA13
+; X64-BMI1TBM-NEXT:    retq
+;
+; X64-BMI1NOTBMBMI2-LABEL: c0_i32:
+; X64-BMI1NOTBMBMI2:       # %bb.0:
+; X64-BMI1NOTBMBMI2-NEXT:    movl $2579, %eax # imm = 0xA13
+; X64-BMI1NOTBMBMI2-NEXT:    bextrl %eax, %edi, %eax
+; X64-BMI1NOTBMBMI2-NEXT:    retq
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 1023
+  ret i32 %tmp1
+}
+
+; Should be still fine, but the mask is shifted
+define i32 @c1_i32(i32 %arg) {
+; X86-LABEL: c1_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shrl $19, %eax
+; X86-NEXT:    andl $4092, %eax # imm = 0xFFC
+; X86-NEXT:    retl
+;
+; X64-LABEL: c1_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    shrl $19, %eax
+; X64-NEXT:    andl $4092, %eax # imm = 0xFFC
+; X64-NEXT:    retq
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 4092
+  ret i32 %tmp1
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define i32 @c2_i32(i32 %arg) {
+; X86-LABEL: c2_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shrl $17, %eax
+; X86-NEXT:    andl $4092, %eax # imm = 0xFFC
+; X86-NEXT:    retl
+;
+; X64-LABEL: c2_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    shrl $17, %eax
+; X64-NEXT:    andl $4092, %eax # imm = 0xFFC
+; X64-NEXT:    retq
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 1023
+  %tmp2 = shl i32 %tmp1, 2
+  ret i32 %tmp2
+}
+
+; The mask covers newly shifted-in bit
+define i32 @c4_i32_bad(i32 %arg) {
+; X86-LABEL: c4_i32_bad:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shrl $19, %eax
+; X86-NEXT:    andl $-2, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: c4_i32_bad:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    shrl $19, %eax
+; X64-NEXT:    andl $-2, %eax
+; X64-NEXT:    retq
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 16382
+  ret i32 %tmp1
+}




More information about the llvm-commits mailing list