[llvm] 18308e1 - AArch64 GIsel: legalize lshr operands, even if it is poison

Jameson Nash via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 30 12:29:58 PST 2021


Author: Jameson Nash
Date: 2021-11-30T15:28:35-05:00
New Revision: 18308e171b5b1dd99627a4d88c7d6c5ff21b8c96

URL: https://github.com/llvm/llvm-project/commit/18308e171b5b1dd99627a4d88c7d6c5ff21b8c96
DIFF: https://github.com/llvm/llvm-project/commit/18308e171b5b1dd99627a4d88c7d6c5ff21b8c96.diff

LOG: AArch64 GIsel: legalize lshr operands, even if it is poison

Previously, this caused GlobalISel to emit invalid IR (a gpr32 to gpr64
copy) and fail during verification.

While this shift is not defined (returns poison), it should not crash
codegen, as it may appear inside dead code (for example, a select
instruction), and it is legal IR input, as long as the value is unused.

Discovered while trying to build Julia with LLVM v13:
https://github.com/JuliaLang/julia/pull/42602.

Reviewed By: aemerson

Differential Revision: https://reviews.llvm.org/D114389

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-shift-imm.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 34c990b96ea2e..563f2b7a07351 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -1920,35 +1920,6 @@ bool AArch64InstructionSelector::preISelLower(MachineInstr &I) {
   MachineRegisterInfo &MRI = MF.getRegInfo();
 
   switch (I.getOpcode()) {
-  case TargetOpcode::G_SHL:
-  case TargetOpcode::G_ASHR:
-  case TargetOpcode::G_LSHR: {
-    // These shifts are legalized to have 64 bit shift amounts because we want
-    // to take advantage of the existing imported selection patterns that assume
-    // the immediates are s64s. However, if the shifted type is 32 bits and for
-    // some reason we receive input GMIR that has an s64 shift amount that's not
-    // a G_CONSTANT, insert a truncate so that we can still select the s32
-    // register-register variant.
-    Register SrcReg = I.getOperand(1).getReg();
-    Register ShiftReg = I.getOperand(2).getReg();
-    const LLT ShiftTy = MRI.getType(ShiftReg);
-    const LLT SrcTy = MRI.getType(SrcReg);
-    if (SrcTy.isVector())
-      return false;
-    assert(!ShiftTy.isVector() && "unexpected vector shift ty");
-    if (SrcTy.getSizeInBits() != 32 || ShiftTy.getSizeInBits() != 64)
-      return false;
-    auto *AmtMI = MRI.getVRegDef(ShiftReg);
-    assert(AmtMI && "could not find a vreg definition for shift amount");
-    if (AmtMI->getOpcode() != TargetOpcode::G_CONSTANT) {
-      // Insert a subregister copy to implement a 64->32 trunc
-      auto Trunc = MIB.buildInstr(TargetOpcode::COPY, {SrcTy}, {})
-                       .addReg(ShiftReg, 0, AArch64::sub_32);
-      MRI.setRegBank(Trunc.getReg(0), RBI.getRegBank(AArch64::GPRRegBankID));
-      I.getOperand(2).setReg(Trunc.getReg(0));
-    }
-    return true;
-  }
   case TargetOpcode::G_STORE: {
     bool Changed = contractCrossBankCopyIntoStore(I, MRI);
     MachineOperand &SrcOp = I.getOperand(0);
@@ -2950,6 +2921,28 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
     if (Opcode == TargetOpcode::G_SHL &&
         MRI.getType(I.getOperand(0).getReg()).isVector())
       return selectVectorSHL(I, MRI);
+
+    // These shifts were legalized to have 64 bit shift amounts because we
+    // want to take advantage of the selection patterns that assume the
+    // immediates are s64s, however, selectBinaryOp will assume both operands
+    // will have the same bit size.
+    {
+      Register SrcReg = I.getOperand(1).getReg();
+      Register ShiftReg = I.getOperand(2).getReg();
+      const LLT ShiftTy = MRI.getType(ShiftReg);
+      const LLT SrcTy = MRI.getType(SrcReg);
+      if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
+          ShiftTy.getSizeInBits() == 64) {
+        assert(!ShiftTy.isVector() && "unexpected vector shift ty");
+        auto *AmtMI = MRI.getVRegDef(ShiftReg);
+        assert(AmtMI && "could not find a vreg definition for shift amount");
+        // Insert a subregister copy to implement a 64->32 trunc
+        auto Trunc = MIB.buildInstr(TargetOpcode::COPY, {SrcTy}, {})
+                         .addReg(ShiftReg, 0, AArch64::sub_32);
+        MRI.setRegBank(Trunc.getReg(0), RBI.getRegBank(AArch64::GPRRegBankID));
+        I.getOperand(2).setReg(Trunc.getReg(0));
+      }
+    }
     LLVM_FALLTHROUGH;
   case TargetOpcode::G_FADD:
   case TargetOpcode::G_FSUB:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir
index da05ea0cea5a1..e56eb581ac601 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir
@@ -491,6 +491,32 @@ body:             |
     $w0 = COPY %2(s32)
 ...
 
+---
+name:            shl_s32_64_gpr
+legalized:       true
+regBankSelected: true
+
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+  - { id: 2, class: gpr }
+
+body:             |
+  bb.0:
+    liveins: $w0, $x1
+
+    ; CHECK-LABEL: name: shl_s32_64_gpr
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64all = COPY $x1
+    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]].sub_32
+    ; CHECK: [[LSLVWr:%[0-9]+]]:gpr32 = LSLVWr [[COPY]], [[COPY2]]
+    ; CHECK: $w0 = COPY [[LSLVWr]]
+    %0(s32) = COPY $w0
+    %1(s64) = COPY $x1
+    %2(s32) = G_SHL %0, %1
+    $w0 = COPY %2(s32)
+...
+
 ---
 # Same as add_s64_gpr, for G_SHL operations.
 name:            shl_s64_gpr

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-shift-imm.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-shift-imm.mir
index 76c3fd5bfdadf..f6a7f36fe704e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-shift-imm.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-shift-imm.mir
@@ -99,6 +99,50 @@ body:             |
     $w0 = COPY %2(s32)
     RET_ReallyLR implicit $w0
 
+...
+---
+name:            ashr_cimm_32_64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    liveins: $w0
+
+    ; CHECK-LABEL: name: ashr_cimm_32_64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm -8
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[MOVi64imm]]
+    ; CHECK: [[ASRVWr:%[0-9]+]]:gpr32 = ASRVWr [[COPY]], [[COPY1]]
+    ; CHECK: $w0 = COPY [[ASRVWr]]
+    ; CHECK: RET_ReallyLR implicit $w0
+    %0:gpr(s32) = COPY $w0
+    %3:gpr(s64) = G_CONSTANT i64 -8
+    %2:gpr(s32) = G_ASHR %0, %3(s64)
+    $w0 = COPY %2(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            lshr_cimm_32_64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    liveins: $w0
+
+    ; CHECK-LABEL: name: lshr_cimm_32_64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm -8
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[MOVi64imm]]
+    ; CHECK: [[LSRVWr:%[0-9]+]]:gpr32 = LSRVWr [[COPY]], [[COPY1]]
+    ; CHECK: $w0 = COPY [[LSRVWr]]
+    ; CHECK: RET_ReallyLR implicit $w0
+    %0:gpr(s32) = COPY $w0
+    %3:gpr(s64) = G_CONSTANT i64 -8
+    %2:gpr(s32) = G_LSHR %0, %3(s64)
+    $w0 = COPY %2(s32)
+    RET_ReallyLR implicit $w0
+
 ...
 ---
 name:            ashr_cimm_64


        


More information about the llvm-commits mailing list