[llvm] r273458 - [AArch64] Remove an overly aggressive assert.

Chad Rosier via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 22 12:18:53 PDT 2016


Author: mcrosier
Date: Wed Jun 22 14:18:52 2016
New Revision: 273458

URL: http://llvm.org/viewvc/llvm-project?rev=273458&view=rev
Log:
[AArch64] Remove an overly aggressive assert.

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
    llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp?rev=273458&r1=273457&r2=273458&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp Wed Jun 22 14:18:52 2016
@@ -2256,11 +2256,6 @@ static bool tryBitfieldInsertOpFromOr(SD
       APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) &&
       (isShiftedMask(Mask0Imm, VT) || isShiftedMask(Mask1Imm, VT))) {
 
-    // We should have already caught the case where we extract hi and low parts.
-    // E.g. BFXIL from 'or (and X, 0xffff0000), (and Y, 0x0000ffff)'.
-    assert(!(isShiftedMask(Mask0Imm, VT) && isShiftedMask(Mask1Imm, VT)) &&
-           "BFXIL should have already been optimized.");
-
     // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
     // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
     // bits to be inserted.

Modified: llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll?rev=273458&r1=273457&r2=273458&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll Wed Jun 22 14:18:52 2016
@@ -453,7 +453,7 @@ define i32 @test7(i32 %a) {
 ; to the original ORR are not okay.  In this case we would be replacing the
 ; 'and' with a 'movk', which would decrease ILP while using the same number of
 ; instructions.
-; CHECK: @test8
+; CHECK-LABEL: @test8
 ; CHECK: mov [[REG2:x[0-9]+]], #157599529959424
 ; CHECK: and [[REG1:x[0-9]+]], x0, #0xff000000000000ff
 ; CHECK: movk [[REG2]], #31059, lsl #16
@@ -463,3 +463,20 @@ define i64 @test8(i64 %a) {
   %2 = or i64 %1, 157601565442048     ; 0x00008f5679530000
   ret i64 %2
 }
+
+; This test exposed an issue with an overly aggressive assert.  The bit of code
+; that is expected to catch this case is unable to deal with the trunc, which
+; results in a failing check due to a mismatch between the BFI opcode and
+; the expected value type of the OR.
+; CHECK-LABEL: @test9
+; CHECK: lsr x0, x0, #12
+; CHECK: lsr [[REG:w[0-9]+]], w1, #23
+; CHECK: bfi w0, [[REG]], #23, #9
+define i32 @test9(i64 %b, i32 %e) {
+  %c = lshr i64 %b, 12
+  %d = trunc i64 %c to i32
+  %f = and i32 %d, 8388607
+  %g = and i32 %e, -8388608
+  %h = or i32 %g, %f
+  ret i32 %h
+}




More information about the llvm-commits mailing list