[llvm] 0b91b1a - [AArch64][GlobalISel] Legalize and Lower Funnel Shift for GlobalISel

Tuan Chuong Goh via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 22 02:33:24 PDT 2023


Author: Tuan Chuong Goh
Date: 2023-08-22T10:32:50+01:00
New Revision: 0b91b1aec4809b97a19b1f9e8585927c978dcda0

URL: https://github.com/llvm/llvm-project/commit/0b91b1aec4809b97a19b1f9e8585927c978dcda0
DIFF: https://github.com/llvm/llvm-project/commit/0b91b1aec4809b97a19b1f9e8585927c978dcda0.diff

LOG: [AArch64][GlobalISel] Legalize and Lower Funnel Shift for GlobalISel

Recognise G_FSHR with constant shift amount as a legal instruction.
Lowers G_FSHL with constant shift to G_FSHR.
If shift amount is non-constant, generic lowering is applied to the
instruction.

Differential Revision: https://reviews.llvm.org/D155484

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
    llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-fshl.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-fshr.mir
    llvm/test/CodeGen/AArch64/funnel-shift.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 0d6cbe7d88311a..869d9a2b532de0 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -898,7 +898,9 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
   getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT})
       .lowerIf([=](const LegalityQuery &Q) { return Q.Types[0].isScalar(); });
 
-  getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
+  getActionDefinitionsBuilder({G_FSHL, G_FSHR})
+      .customFor({{s32, s32}, {s32, s64}, {s64, s64}})
+      .lower();
 
   getActionDefinitionsBuilder(G_ROTR)
       .legalFor({{s32, s64}, {s64, s64}})
@@ -1003,6 +1005,9 @@ bool AArch64LegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
   case TargetOpcode::G_SBFX:
   case TargetOpcode::G_UBFX:
     return legalizeBitfieldExtract(MI, MRI, Helper);
+  case TargetOpcode::G_FSHL:
+  case TargetOpcode::G_FSHR:
+    return legalizeFunnelShift(MI, MRI, MIRBuilder, Observer, Helper);
   case TargetOpcode::G_ROTR:
     return legalizeRotate(MI, MRI, Helper);
   case TargetOpcode::G_CTPOP:
@@ -1023,6 +1028,59 @@ bool AArch64LegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
   llvm_unreachable("expected switch to return");
 }
 
+bool AArch64LegalizerInfo::legalizeFunnelShift(MachineInstr &MI,
+                                               MachineRegisterInfo &MRI,
+                                               MachineIRBuilder &MIRBuilder,
+                                               GISelChangeObserver &Observer,
+                                               LegalizerHelper &Helper) const {
+  assert(MI.getOpcode() == TargetOpcode::G_FSHL ||
+         MI.getOpcode() == TargetOpcode::G_FSHR);
+
+  // Keep as G_FSHR if shift amount is a G_CONSTANT, else use generic
+  // lowering
+  Register ShiftNo = MI.getOperand(3).getReg();
+  LLT ShiftTy = MRI.getType(ShiftNo);
+  auto VRegAndVal = getIConstantVRegValWithLookThrough(ShiftNo, MRI);
+
+  // Adjust shift amount according to Opcode (FSHL/FSHR)
+  // Convert FSHL to FSHR
+  LLT OperationTy = MRI.getType(MI.getOperand(0).getReg());
+  APInt BitWidth(ShiftTy.getSizeInBits(), OperationTy.getSizeInBits(), false);
+
+  // Lower non-constant shifts and leave zero shifts to the optimizer.
+  if (!VRegAndVal || VRegAndVal->Value.urem(BitWidth) == 0)
+    return (Helper.lowerFunnelShiftAsShifts(MI) ==
+            LegalizerHelper::LegalizeResult::Legalized);
+
+  APInt Amount = VRegAndVal->Value.urem(BitWidth);
+
+  Amount = MI.getOpcode() == TargetOpcode::G_FSHL ? BitWidth - Amount : Amount;
+
+  // If the instruction is G_FSHR, has a 64-bit G_CONSTANT for shift amount
+  // in the range of 0 <-> BitWidth, it is legal
+  if (ShiftTy.getSizeInBits() == 64 && MI.getOpcode() == TargetOpcode::G_FSHR &&
+      VRegAndVal->Value.ult(BitWidth))
+    return true;
+
+  // Cast the ShiftNumber to a 64-bit type
+  auto Cast64 = MIRBuilder.buildConstant(LLT::scalar(64), Amount.zext(64));
+
+  if (MI.getOpcode() == TargetOpcode::G_FSHR) {
+    Observer.changingInstr(MI);
+    MI.getOperand(3).setReg(Cast64.getReg(0));
+    Observer.changedInstr(MI);
+  }
+  // If Opcode is FSHL, remove the FSHL instruction and create a FSHR
+  // instruction
+  else if (MI.getOpcode() == TargetOpcode::G_FSHL) {
+    MIRBuilder.buildInstr(TargetOpcode::G_FSHR, {MI.getOperand(0).getReg()},
+                          {MI.getOperand(1).getReg(), MI.getOperand(2).getReg(),
+                           Cast64.getReg(0)});
+    MI.eraseFromParent();
+  }
+  return true;
+}
+
 bool AArch64LegalizerInfo::legalizeRotate(MachineInstr &MI,
                                           MachineRegisterInfo &MRI,
                                           LegalizerHelper &Helper) const {

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h
index c10f6e071ed430..13a68b83c54ee1 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h
@@ -51,6 +51,10 @@ class AArch64LegalizerInfo : public LegalizerInfo {
                                LegalizerHelper &Helper) const;
   bool legalizeRotate(MachineInstr &MI, MachineRegisterInfo &MRI,
                       LegalizerHelper &Helper) const;
+  bool legalizeFunnelShift(MachineInstr &MI, MachineRegisterInfo &MRI,
+                           MachineIRBuilder &MIRBuilder,
+                           GISelChangeObserver &Observer,
+                           LegalizerHelper &Helper) const;
   bool legalizeCTPOP(MachineInstr &MI, MachineRegisterInfo &MRI,
                      LegalizerHelper &Helper) const;
   bool legalizeAtomicCmpxchg128(MachineInstr &MI, MachineRegisterInfo &MRI,

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fshl.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fshl.mir
index f1945cdaf263cc..80617314ce0e54 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fshl.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fshl.mir
@@ -1,66 +1,574 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
 # RUN: llc -O0 -mtriple=arm64-unknown-unknown -global-isel -run-pass=legalizer -global-isel-abort=1 %s -o - | FileCheck %s
+
+---
+name:            fshl_i8
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1, $w2
+
+    ; CHECK-LABEL: name: fshl_i8
+    ; CHECK: liveins: $w0, $w1, $w2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[COPY3]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C2]]
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[AND2]](s32)
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[C3]](s64)
+    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[AND1]], [[C2]]
+    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C2]]
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND5]], [[AND4]](s32)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR1]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %3:_(s32) = COPY $w0
+    %0:_(s8) = G_TRUNC %3(s32)
+    %4:_(s32) = COPY $w1
+    %1:_(s8) = G_TRUNC %4(s32)
+    %5:_(s32) = COPY $w2
+    %2:_(s8) = G_TRUNC %5(s32)
+    %6:_(s8) = G_FSHL %0, %1, %2(s8)
+    %7:_(s32) = G_ANYEXT %6(s8)
+    $w0 = COPY %7(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshl_i16
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1, $w2
+
+    ; CHECK-LABEL: name: fshl_i16
+    ; CHECK: liveins: $w0, $w1, $w2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[COPY3]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C2]]
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[AND2]](s32)
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[C3]](s64)
+    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[AND1]], [[C2]]
+    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C2]]
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND5]], [[AND4]](s32)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR1]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %3:_(s32) = COPY $w0
+    %0:_(s16) = G_TRUNC %3(s32)
+    %4:_(s32) = COPY $w1
+    %1:_(s16) = G_TRUNC %4(s32)
+    %5:_(s32) = COPY $w2
+    %2:_(s16) = G_TRUNC %5(s32)
+    %6:_(s16) = G_FSHL %0, %1, %2(s16)
+    %7:_(s32) = G_ANYEXT %6(s16)
+    $w0 = COPY %7(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshl_i32
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1, $w2
+
+    ; CHECK-LABEL: name: fshl_i32
+    ; CHECK: liveins: $w0, $w1, $w2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[C]]
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[AND]](s32)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s64)
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[AND1]](s32)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR1]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %2:_(s32) = COPY $w2
+    %3:_(s32) = G_FSHL %0, %1, %2(s32)
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshl_i64
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0, $x1, $x2
+
+    ; CHECK-LABEL: name: fshl_i64
+    ; CHECK: liveins: $x0, $x1, $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s64)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[C2]](s64)
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[LSHR]], [[AND1]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[LSHR1]]
+    ; CHECK-NEXT: $x0 = COPY [[OR]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %2:_(s64) = COPY $x2
+    %3:_(s64) = G_FSHL %0, %1, %2(s64)
+    $x0 = COPY %3(s64)
+    RET_ReallyLR implicit $x0
+
+...
+
+---
+name:            fshl_i8_const_shift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshl_i8_const_shift
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %7:_(s8) = G_CONSTANT i8 4
+    %5:_(s8) = G_FSHL %0, %1, %7(s8)
+    %6:_(s32) = G_ANYEXT %5(s8)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshl_i8_const_overshift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshl_i8_const_overshift
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %7:_(s8) = G_CONSTANT i8 10
+    %5:_(s8) = G_FSHL %0, %1, %7(s8)
+    %6:_(s32) = G_ANYEXT %5(s8)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
 ---
-name:            test_s32
+name:            fshl_i8_shift_by_bitwidth
 alignment:       4
 tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $w0, $w1
 
-    ; CHECK-LABEL: name: test_s32
+    ; CHECK-LABEL: name: fshl_i8_shift_by_bitwidth
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY1]], [[C1]]
-    ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[C]]
-    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[AND]](s32)
-    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s64)
-    ; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[AND1]](s32)
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR1]]
-    ; CHECK: $w0 = COPY [[OR]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s64)
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[C3]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR1]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %7:_(s8) = G_CONSTANT i8 8
+    %5:_(s8) = G_FSHL %0, %1, %7(s8)
+    %6:_(s32) = G_ANYEXT %5(s8)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshl_i16_const_shift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshl_i16_const_shift
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_CONSTANT i16 12
+    %5:_(s16) = G_FSHL %0, %1, %4(s16)
+    %6:_(s32) = G_ANYEXT %5(s16)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshl_i16_const_overshift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshl_i16_const_overshift
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_CONSTANT i16 20
+    %5:_(s16) = G_FSHL %0, %1, %4(s16)
+    %6:_(s32) = G_ANYEXT %5(s16)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshl_i16_shift_by_bitwidth
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshl_i16_shift_by_bitwidth
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s64)
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[C3]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR1]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_CONSTANT i16 16
+    %5:_(s16) = G_FSHL %0, %1, %4(s16)
+    %6:_(s32) = G_ANYEXT %5(s16)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshl_i32_const_shift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshl_i32_const_shift
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 23
+    ; CHECK-NEXT: [[FSHR:%[0-9]+]]:_(s32) = G_FSHR [[COPY]], [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[FSHR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
-    %2:_(s32) = G_FSHL %0(s32), %0, %1
-    $w0 = COPY %2(s32)
+    %2:_(s32) = G_CONSTANT i32 9
+    %3:_(s32) = G_FSHL %0, %1, %2(s32)
+    $w0 = COPY %3(s32)
     RET_ReallyLR implicit $w0
 
 ...
 
 ---
-name:            test_s64
+name:            fshl_i32_const_overshift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshl_i32_const_overshift
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 22
+    ; CHECK-NEXT: [[FSHR:%[0-9]+]]:_(s32) = G_FSHR [[COPY]], [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[FSHR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %4:_(s32) = G_CONSTANT i32 42
+    %3:_(s32) = G_FSHL %0, %1, %4(s32)
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshl_i32_shift_by_bandwidth
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshl_i32_shift_by_bandwidth
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s64)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[C2]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR1]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %4:_(s32) = G_CONSTANT i32 32
+    %3:_(s32) = G_FSHL %0, %1, %4(s32)
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshl_i64_const_shift
 alignment:       4
 tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x0, $x1
 
-    ; CHECK-LABEL: name: test_s64
+    ; CHECK-LABEL: name: fshl_i64_const_shift
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
-    ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
-    ; CHECK: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY1]], [[C1]]
-    ; CHECK: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C]]
-    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s64)
-    ; CHECK: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[C2]](s64)
-    ; CHECK: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[LSHR]], [[AND1]](s64)
-    ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[LSHR1]]
-    ; CHECK: $x0 = COPY [[OR]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 23
+    ; CHECK-NEXT: [[FSHR:%[0-9]+]]:_(s64) = G_FSHR [[COPY]], [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[FSHR]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
-    %2:_(s64) = G_FSHL %0(s64), %0, %1(s64)
-    $x0 = COPY %2(s64)
+    %4:_(s64) = G_CONSTANT i64 41
+    %3:_(s64) = G_FSHL %0, %1, %4(s64)
+    $x0 = COPY %3(s64)
     RET_ReallyLR implicit $x0
 
 ...
 
+---
+name:            fshl_i64_const_overshift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0, $x1
+
+    ; CHECK-LABEL: name: fshl_i64_const_overshift
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+    ; CHECK-NEXT: [[FSHR:%[0-9]+]]:_(s64) = G_FSHR [[COPY]], [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[FSHR]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %4:_(s64) = G_CONSTANT i64 72
+    %3:_(s64) = G_FSHL %0, %1, %4(s64)
+    $x0 = COPY %3(s64)
+    RET_ReallyLR implicit $x0
+
+...
+
+---
+name:            fshl_i64_shift_by_bandwidth
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0, $x1
+
+    ; CHECK-LABEL: name: fshl_i64_shift_by_bandwidth
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C1]](s64)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[C2]](s64)
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[LSHR]], [[C]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[LSHR1]]
+    ; CHECK-NEXT: $x0 = COPY [[OR]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %4:_(s64) = G_CONSTANT i64 64
+    %3:_(s64) = G_FSHL %0, %1, %4(s64)
+    $x0 = COPY %3(s64)
+    RET_ReallyLR implicit $x0
+
+...
+
+
+
+---
+name:            fshl_v4i32_shift_by_bitwidth
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $q0, $q1
+
+    ; CHECK-LABEL: name: fshl_v4i32_shift_by_bitwidth
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
+    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C2]](s32), [[C2]](s32), [[C2]](s32), [[C2]](s32)
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY1]], [[BUILD_VECTOR2]](<4 x s32>)
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[LSHR]], [[BUILD_VECTOR1]](<4 x s32>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s32>) = G_OR [[SHL]], [[LSHR1]]
+    ; CHECK-NEXT: $q0 = COPY [[OR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(<4 x s32>) = COPY $q1
+    %3:_(s32) = G_CONSTANT i32 32
+    %2:_(<4 x s32>) = G_BUILD_VECTOR %3(s32), %3(s32), %3(s32), %3(s32)
+    %4:_(<4 x s32>) = G_FSHL %0, %1, %2(<4 x s32>)
+    $q0 = COPY %4(<4 x s32>)
+    RET_ReallyLR implicit $q0
+
+...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fshr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fshr.mir
index 725461130edc06..81bf4332b456d5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fshr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fshr.mir
@@ -1,66 +1,559 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
 # RUN: llc -O0 -mtriple=arm64-unknown-unknown -global-isel -run-pass=legalizer -global-isel-abort=1 %s -o - | FileCheck %s
+
+---
+name:            fshr_i8
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1, $w2
+
+    ; CHECK-LABEL: name: fshr_i8
+    ; CHECK: liveins: $w0, $w1, $w2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[COPY3]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[AND1]], [[C3]]
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND2]](s32)
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C3]]
+    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND4]], [[AND3]](s32)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %3:_(s32) = COPY $w0
+    %0:_(s8) = G_TRUNC %3(s32)
+    %4:_(s32) = COPY $w1
+    %1:_(s8) = G_TRUNC %4(s32)
+    %5:_(s32) = COPY $w2
+    %2:_(s8) = G_TRUNC %5(s32)
+    %6:_(s8) = G_FSHR %0, %1, %2(s8)
+    %7:_(s32) = G_ANYEXT %6(s8)
+    $w0 = COPY %7(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshr_i16
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1, $w2
+
+    ; CHECK-LABEL: name: fshr_i16
+    ; CHECK: liveins: $w0, $w1, $w2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[COPY3]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[AND1]], [[C3]]
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND2]](s32)
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C3]]
+    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND4]], [[AND3]](s32)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %3:_(s32) = COPY $w0
+    %0:_(s16) = G_TRUNC %3(s32)
+    %4:_(s32) = COPY $w1
+    %1:_(s16) = G_TRUNC %4(s32)
+    %5:_(s32) = COPY $w2
+    %2:_(s16) = G_TRUNC %5(s32)
+    %6:_(s16) = G_FSHR %0, %1, %2(s16)
+    %7:_(s32) = G_ANYEXT %6(s16)
+    $w0 = COPY %7(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshr_i32
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1, $w2
+
+    ; CHECK-LABEL: name: fshr_i32
+    ; CHECK: liveins: $w0, $w1, $w2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[C]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND1]](s32)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[AND]](s32)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %2:_(s32) = COPY $w2
+    %3:_(s32) = G_FSHR %0, %1, %2(s32)
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshr_i64
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0, $x1, $x2
+
+    ; CHECK-LABEL: name: fshr_i64
+    ; CHECK: liveins: $x0, $x1, $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SHL]], [[AND1]](s64)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[AND]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+    ; CHECK-NEXT: $x0 = COPY [[OR]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %2:_(s64) = COPY $x2
+    %3:_(s64) = G_FSHR %0, %1, %2(s64)
+    $x0 = COPY %3(s64)
+    RET_ReallyLR implicit $x0
+
+...
+
+
 ---
-name:            test_s32
+name:            fshr_i8_const_shift
 alignment:       4
 tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $w0, $w1
 
-    ; CHECK-LABEL: name: test_s32
+    ; CHECK-LABEL: name: fshr_i8_const_shift
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY1]], [[C1]]
-    ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[C]]
-    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C2]](s64)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND1]](s32)
-    ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[AND]](s32)
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[LSHR]]
-    ; CHECK: $w0 = COPY [[OR]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %7:_(s8) = G_CONSTANT i8 7
+    %5:_(s8) = G_FSHR %0, %1, %7(s8)
+    %6:_(s32) = G_ANYEXT %5(s8)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshr_i8_const_overshift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshr_i8_const_overshift
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %7:_(s8) = G_CONSTANT i8 10
+    %5:_(s8) = G_FSHR %0, %1, %7(s8)
+    %6:_(s32) = G_ANYEXT %5(s8)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshr_i8_shift_by_bandwidth
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshr_i8_shift_by_bandwidth
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[C1]](s64)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C3]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s8) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s8) = G_TRUNC %3(s32)
+    %7:_(s8) = G_CONSTANT i8 8
+    %5:_(s8) = G_FSHR %0, %1, %7(s8)
+    %6:_(s32) = G_ANYEXT %5(s8)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshr_i16_const_shift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshr_i16_const_shift
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 11
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_CONSTANT i16 5
+    %5:_(s16) = G_FSHR %0, %1, %4(s16)
+    %6:_(s32) = G_ANYEXT %5(s16)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshr_i16_const_overshift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshr_i16_const_overshift
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C2]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_CONSTANT i16 20
+    %5:_(s16) = G_FSHR %0, %1, %4(s16)
+    %6:_(s32) = G_ANYEXT %5(s16)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshr_i16_shift_by_bandwidth
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshr_i16_shift_by_bandwidth
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[C1]](s64)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C3]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[LSHR]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %2:_(s32) = COPY $w0
+    %0:_(s16) = G_TRUNC %2(s32)
+    %3:_(s32) = COPY $w1
+    %1:_(s16) = G_TRUNC %3(s32)
+    %4:_(s16) = G_CONSTANT i16 16
+    %5:_(s16) = G_FSHR %0, %1, %4(s16)
+    %6:_(s32) = G_ANYEXT %5(s16)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshr_i32_const_shift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshr_i32_const_shift
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 9
+    ; CHECK-NEXT: [[FSHR:%[0-9]+]]:_(s32) = G_FSHR [[COPY]], [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[FSHR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %2:_(s32) = G_CONSTANT i32 9
+    %3:_(s32) = G_FSHR %0, %1, %2(s32)
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshr_i32_const_overshift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshr_i32_const_overshift
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
+    ; CHECK-NEXT: [[FSHR:%[0-9]+]]:_(s32) = G_FSHR [[COPY]], [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[FSHR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
-    %2:_(s32) = G_FSHR %0(s32), %0, %1
-    $w0 = COPY %2(s32)
+    %4:_(s32) = G_CONSTANT i32  42
+    %3:_(s32) = G_FSHR %0, %1, %4(s32)
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
+
+...
+
+---
+name:            fshr_i32_shift_by_bitwidth
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: fshr_i32_shift_by_bitwidth
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(s32) = COPY $w1
+    $w0 = COPY %1(s32)
     RET_ReallyLR implicit $w0
 
 ...
 
 ---
-name:            test_s64
+name:            fshr_i64_const_shift
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0, $x1
+
+    ; CHECK-LABEL: name: fshr_i64_const_shift
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 41
+    ; CHECK-NEXT: [[FSHR:%[0-9]+]]:_(s64) = G_FSHR [[COPY]], [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[FSHR]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %4:_(s64) = G_CONSTANT i64 41
+    %3:_(s64) = G_FSHR %0, %1, %4(s64)
+    $x0 = COPY %3(s64)
+    RET_ReallyLR implicit $x0
+
+...
+
+---
+name:            fshr_i64_const_overshift
 alignment:       4
 tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x0, $x1
 
-    ; CHECK-LABEL: name: test_s64
+    ; CHECK-LABEL: name: fshr_i64_const_overshift
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
-    ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
-    ; CHECK: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY1]], [[C1]]
-    ; CHECK: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C]]
-    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C2]](s64)
-    ; CHECK: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SHL]], [[AND1]](s64)
-    ; CHECK: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[AND]](s64)
-    ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
-    ; CHECK: $x0 = COPY [[OR]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[FSHR:%[0-9]+]]:_(s64) = G_FSHR [[COPY]], [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[FSHR]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
-    %2:_(s64) = G_FSHR %0(s64), %0, %1(s64)
-    $x0 = COPY %2(s64)
+    %4:_(s64) = G_CONSTANT i64 72
+    %3:_(s64) = G_FSHR %0, %1, %4(s64)
+    $x0 = COPY %3(s64)
     RET_ReallyLR implicit $x0
 
 ...
 
+---
+name:            fshr_i64_shift_by_bandwidth
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0, $x1
+
+    ; CHECK-LABEL: name: fshr_i64_shift_by_bandwidth
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SHL]], [[C]](s64)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY1]], [[C1]](s64)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+    ; CHECK-NEXT: $x0 = COPY [[OR]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %4:_(s64) = G_CONSTANT i64 64
+    %3:_(s64) = G_FSHR %0, %1, %4(s64)
+    $x0 = COPY %3(s64)
+    RET_ReallyLR implicit $x0
+
+...
+
+---
+name:            fshr_v4i32_shift_by_bitwidth
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $q0, $q1
+
+    ; CHECK-LABEL: name: fshr_v4i32_shift_by_bitwidth
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
+    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C2]](s32), [[C2]](s32), [[C2]](s32), [[C2]](s32)
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL [[COPY]], [[BUILD_VECTOR2]](<4 x s32>)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(<4 x s32>) = G_SHL [[SHL]], [[BUILD_VECTOR1]](<4 x s32>)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY1]], [[BUILD_VECTOR]](<4 x s32>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s32>) = G_OR [[SHL1]], [[LSHR]]
+    ; CHECK-NEXT: $q0 = COPY [[OR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(<4 x s32>) = COPY $q1
+    %3:_(s32) = G_CONSTANT i32 32
+    %2:_(<4 x s32>) = G_BUILD_VECTOR %3(s32), %3(s32), %3(s32), %3(s32)
+    %4:_(<4 x s32>) = G_FSHR %0, %1, %2(<4 x s32>)
+    $q0 = COPY %4(<4 x s32>)
+    RET_ReallyLR implicit $q0
+
+...

diff  --git a/llvm/test/CodeGen/AArch64/funnel-shift.ll b/llvm/test/CodeGen/AArch64/funnel-shift.ll
index 1a2b06e0afb9d3..25861c1c6c2a5d 100644
--- a/llvm/test/CodeGen/AArch64/funnel-shift.ll
+++ b/llvm/test/CodeGen/AArch64/funnel-shift.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64-- -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i8 @llvm.fshl.i8(i8, i8, i8)
 declare i16 @llvm.fshl.i16(i16, i16, i16)
@@ -17,49 +18,110 @@ declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
 ; General case - all operands can be variables.
 
 define i32 @fshl_i32(i32 %x, i32 %y, i32 %z) {
-; CHECK-LABEL: fshl_i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    lsr w8, w1, #1
-; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
-; CHECK-NEXT:    mvn w9, w2
-; CHECK-NEXT:    lsl w10, w0, w2
-; CHECK-NEXT:    lsr w8, w8, w9
-; CHECK-NEXT:    orr w0, w10, w8
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshl_i32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    lsr w8, w1, #1
+; CHECK-SD-NEXT:    // kill: def $w2 killed $w2 def $x2
+; CHECK-SD-NEXT:    mvn w9, w2
+; CHECK-SD-NEXT:    lsl w10, w0, w2
+; CHECK-SD-NEXT:    lsr w8, w8, w9
+; CHECK-SD-NEXT:    orr w0, w10, w8
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshl_i32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #31 // =0x1f
+; CHECK-GI-NEXT:    lsr w9, w1, #1
+; CHECK-GI-NEXT:    and w10, w2, #0x1f
+; CHECK-GI-NEXT:    bic w8, w8, w2
+; CHECK-GI-NEXT:    lsl w10, w0, w10
+; CHECK-GI-NEXT:    lsr w8, w9, w8
+; CHECK-GI-NEXT:    orr w0, w10, w8
+; CHECK-GI-NEXT:    ret
   %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %z)
   ret i32 %f
 }
 
 define i64 @fshl_i64(i64 %x, i64 %y, i64 %z) {
-; CHECK-LABEL: fshl_i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    lsr x8, x1, #1
-; CHECK-NEXT:    mvn w9, w2
-; CHECK-NEXT:    lsl x10, x0, x2
-; CHECK-NEXT:    lsr x8, x8, x9
-; CHECK-NEXT:    orr x0, x10, x8
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshl_i64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    lsr x8, x1, #1
+; CHECK-SD-NEXT:    mvn w9, w2
+; CHECK-SD-NEXT:    lsl x10, x0, x2
+; CHECK-SD-NEXT:    lsr x8, x8, x9
+; CHECK-SD-NEXT:    orr x0, x10, x8
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshl_i64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #63 // =0x3f
+; CHECK-GI-NEXT:    lsr x9, x1, #1
+; CHECK-GI-NEXT:    and x10, x2, #0x3f
+; CHECK-GI-NEXT:    bic x8, x8, x2
+; CHECK-GI-NEXT:    lsl x10, x0, x10
+; CHECK-GI-NEXT:    lsr x8, x9, x8
+; CHECK-GI-NEXT:    orr x0, x10, x8
+; CHECK-GI-NEXT:    ret
   %f = call i64 @llvm.fshl.i64(i64 %x, i64 %y, i64 %z)
   ret i64 %f
 }
 
 define i128 @fshl_i128(i128 %x, i128 %y, i128 %z) nounwind {
-; CHECK-LABEL: fshl_i128:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    tst x4, #0x40
-; CHECK-NEXT:    mvn w11, w4
-; CHECK-NEXT:    csel x8, x3, x0, ne
-; CHECK-NEXT:    csel x9, x2, x3, ne
-; CHECK-NEXT:    csel x12, x0, x1, ne
-; CHECK-NEXT:    lsr x9, x9, #1
-; CHECK-NEXT:    lsr x10, x8, #1
-; CHECK-NEXT:    lsl x8, x8, x4
-; CHECK-NEXT:    lsl x12, x12, x4
-; CHECK-NEXT:    lsr x9, x9, x11
-; CHECK-NEXT:    lsr x10, x10, x11
-; CHECK-NEXT:    orr x0, x8, x9
-; CHECK-NEXT:    orr x1, x12, x10
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshl_i128:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    tst x4, #0x40
+; CHECK-SD-NEXT:    mvn w11, w4
+; CHECK-SD-NEXT:    csel x8, x3, x0, ne
+; CHECK-SD-NEXT:    csel x9, x2, x3, ne
+; CHECK-SD-NEXT:    csel x12, x0, x1, ne
+; CHECK-SD-NEXT:    lsr x9, x9, #1
+; CHECK-SD-NEXT:    lsr x10, x8, #1
+; CHECK-SD-NEXT:    lsl x8, x8, x4
+; CHECK-SD-NEXT:    lsl x12, x12, x4
+; CHECK-SD-NEXT:    lsr x9, x9, x11
+; CHECK-SD-NEXT:    lsr x10, x10, x11
+; CHECK-SD-NEXT:    orr x0, x8, x9
+; CHECK-SD-NEXT:    orr x1, x12, x10
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshl_i128:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    and x9, x4, #0x7f
+; CHECK-GI-NEXT:    mov w10, #64 // =0x40
+; CHECK-GI-NEXT:    lsl x14, x3, #63
+; CHECK-GI-NEXT:    sub x12, x10, x9
+; CHECK-GI-NEXT:    lsl x13, x1, x9
+; CHECK-GI-NEXT:    mov w8, #127 // =0x7f
+; CHECK-GI-NEXT:    lsr x12, x0, x12
+; CHECK-GI-NEXT:    bic x8, x8, x4
+; CHECK-GI-NEXT:    sub x15, x9, #64
+; CHECK-GI-NEXT:    cmp x9, #64
+; CHECK-GI-NEXT:    lsl x9, x0, x9
+; CHECK-GI-NEXT:    lsl x15, x0, x15
+; CHECK-GI-NEXT:    orr x12, x12, x13
+; CHECK-GI-NEXT:    orr x13, x14, x2, lsr #1
+; CHECK-GI-NEXT:    lsr x14, x3, #1
+; CHECK-GI-NEXT:    sub x10, x10, x8
+; CHECK-GI-NEXT:    sub x16, x8, #64
+; CHECK-GI-NEXT:    csel x9, x9, xzr, lo
+; CHECK-GI-NEXT:    lsr x17, x13, x8
+; CHECK-GI-NEXT:    lsl x10, x14, x10
+; CHECK-GI-NEXT:    csel x12, x12, x15, lo
+; CHECK-GI-NEXT:    tst x4, #0x7f
+; CHECK-GI-NEXT:    lsr x15, x14, x16
+; CHECK-GI-NEXT:    mvn x11, x4
+; CHECK-GI-NEXT:    csel x12, x1, x12, eq
+; CHECK-GI-NEXT:    orr x10, x17, x10
+; CHECK-GI-NEXT:    cmp x8, #64
+; CHECK-GI-NEXT:    lsr x14, x14, x8
+; CHECK-GI-NEXT:    csel x10, x10, x15, lo
+; CHECK-GI-NEXT:    tst x11, #0x7f
+; CHECK-GI-NEXT:    csel x10, x13, x10, eq
+; CHECK-GI-NEXT:    cmp x8, #64
+; CHECK-GI-NEXT:    csel x8, x14, xzr, lo
+; CHECK-GI-NEXT:    orr x0, x9, x10
+; CHECK-GI-NEXT:    orr x1, x12, x8
+; CHECK-GI-NEXT:    ret
   %f = call i128 @llvm.fshl.i128(i128 %x, i128 %y, i128 %z)
   ret i128 %f
 }
@@ -67,22 +129,38 @@ define i128 @fshl_i128(i128 %x, i128 %y, i128 %z) nounwind {
 ; Verify that weird types are minimally supported.
 declare i37 @llvm.fshl.i37(i37, i37, i37)
 define i37 @fshl_i37(i37 %x, i37 %y, i37 %z) {
-; CHECK-LABEL: fshl_i37:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x9, #46053 // =0xb3e5
-; CHECK-NEXT:    and x8, x2, #0x1fffffffff
-; CHECK-NEXT:    movk x9, #12398, lsl #16
-; CHECK-NEXT:    movk x9, #15941, lsl #32
-; CHECK-NEXT:    movk x9, #1771, lsl #48
-; CHECK-NEXT:    umulh x8, x8, x9
-; CHECK-NEXT:    mov w9, #37 // =0x25
-; CHECK-NEXT:    msub w8, w8, w9, w2
-; CHECK-NEXT:    ubfiz x9, x1, #26, #37
-; CHECK-NEXT:    mvn w10, w8
-; CHECK-NEXT:    lsl x8, x0, x8
-; CHECK-NEXT:    lsr x9, x9, x10
-; CHECK-NEXT:    orr x0, x8, x9
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshl_i37:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov x9, #46053 // =0xb3e5
+; CHECK-SD-NEXT:    and x8, x2, #0x1fffffffff
+; CHECK-SD-NEXT:    movk x9, #12398, lsl #16
+; CHECK-SD-NEXT:    movk x9, #15941, lsl #32
+; CHECK-SD-NEXT:    movk x9, #1771, lsl #48
+; CHECK-SD-NEXT:    umulh x8, x8, x9
+; CHECK-SD-NEXT:    mov w9, #37 // =0x25
+; CHECK-SD-NEXT:    msub w8, w8, w9, w2
+; CHECK-SD-NEXT:    ubfiz x9, x1, #26, #37
+; CHECK-SD-NEXT:    mvn w10, w8
+; CHECK-SD-NEXT:    lsl x8, x0, x8
+; CHECK-SD-NEXT:    lsr x9, x9, x10
+; CHECK-SD-NEXT:    orr x0, x8, x9
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshl_i37:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #37 // =0x25
+; CHECK-GI-NEXT:    and x9, x2, #0x1fffffffff
+; CHECK-GI-NEXT:    udiv x10, x9, x8
+; CHECK-GI-NEXT:    msub x8, x10, x8, x9
+; CHECK-GI-NEXT:    mov w9, #36 // =0x24
+; CHECK-GI-NEXT:    ubfx x10, x1, #1, #36
+; CHECK-GI-NEXT:    sub x9, x9, x8
+; CHECK-GI-NEXT:    and x8, x8, #0x1fffffffff
+; CHECK-GI-NEXT:    and x9, x9, #0x1fffffffff
+; CHECK-GI-NEXT:    lsl x8, x0, x8
+; CHECK-GI-NEXT:    lsr x9, x10, x9
+; CHECK-GI-NEXT:    orr x0, x8, x9
+; CHECK-GI-NEXT:    ret
   %f = call i37 @llvm.fshl.i37(i37 %x, i37 %y, i37 %z)
   ret i37 %f
 }
@@ -100,19 +178,30 @@ define i7 @fshl_i7_const_fold() {
 }
 
 define i8 @fshl_i8_const_fold_overshift_1() {
-; CHECK-LABEL: fshl_i8_const_fold_overshift_1:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #128 // =0x80
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshl_i8_const_fold_overshift_1:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w0, #128 // =0x80
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshl_i8_const_fold_overshift_1:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w0, #-128 // =0xffffff80
+; CHECK-GI-NEXT:    ret
   %f = call i8 @llvm.fshl.i8(i8 255, i8 0, i8 15)
   ret i8 %f
 }
 
 define i8 @fshl_i8_const_fold_overshift_2() {
-; CHECK-LABEL: fshl_i8_const_fold_overshift_2:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #120 // =0x78
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshl_i8_const_fold_overshift_2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w0, #120 // =0x78
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshl_i8_const_fold_overshift_2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #15 // =0xf
+; CHECK-GI-NEXT:    lsl w0, w8, #3
+; CHECK-GI-NEXT:    ret
   %f = call i8 @llvm.fshl.i8(i8 15, i8 15, i8 11)
   ret i8 %f
 }
@@ -162,10 +251,15 @@ define i64 @fshl_i64_const_overshift(i64 %x, i64 %y) {
 ; This should work without any node-specific logic.
 
 define i8 @fshl_i8_const_fold() {
-; CHECK-LABEL: fshl_i8_const_fold:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #128 // =0x80
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshl_i8_const_fold:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w0, #128 // =0x80
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshl_i8_const_fold:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w0, #-128 // =0xffffff80
+; CHECK-GI-NEXT:    ret
   %f = call i8 @llvm.fshl.i8(i8 255, i8 0, i8 7)
   ret i8 %f
 }
@@ -175,28 +269,50 @@ define i8 @fshl_i8_const_fold() {
 ; General case - all operands can be variables.
 
 define i32 @fshr_i32(i32 %x, i32 %y, i32 %z) {
-; CHECK-LABEL: fshr_i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    lsl w8, w0, #1
-; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
-; CHECK-NEXT:    mvn w9, w2
-; CHECK-NEXT:    lsr w10, w1, w2
-; CHECK-NEXT:    lsl w8, w8, w9
-; CHECK-NEXT:    orr w0, w8, w10
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshr_i32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    lsl w8, w0, #1
+; CHECK-SD-NEXT:    // kill: def $w2 killed $w2 def $x2
+; CHECK-SD-NEXT:    mvn w9, w2
+; CHECK-SD-NEXT:    lsr w10, w1, w2
+; CHECK-SD-NEXT:    lsl w8, w8, w9
+; CHECK-SD-NEXT:    orr w0, w8, w10
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshr_i32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #31 // =0x1f
+; CHECK-GI-NEXT:    lsl w9, w0, #1
+; CHECK-GI-NEXT:    and w10, w2, #0x1f
+; CHECK-GI-NEXT:    bic w8, w8, w2
+; CHECK-GI-NEXT:    lsl w8, w9, w8
+; CHECK-GI-NEXT:    lsr w9, w1, w10
+; CHECK-GI-NEXT:    orr w0, w8, w9
+; CHECK-GI-NEXT:    ret
   %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %z)
   ret i32 %f
 }
 
 define i64 @fshr_i64(i64 %x, i64 %y, i64 %z) {
-; CHECK-LABEL: fshr_i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    lsl x8, x0, #1
-; CHECK-NEXT:    mvn w9, w2
-; CHECK-NEXT:    lsr x10, x1, x2
-; CHECK-NEXT:    lsl x8, x8, x9
-; CHECK-NEXT:    orr x0, x8, x10
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshr_i64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    lsl x8, x0, #1
+; CHECK-SD-NEXT:    mvn w9, w2
+; CHECK-SD-NEXT:    lsr x10, x1, x2
+; CHECK-SD-NEXT:    lsl x8, x8, x9
+; CHECK-SD-NEXT:    orr x0, x8, x10
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshr_i64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #63 // =0x3f
+; CHECK-GI-NEXT:    lsl x9, x0, #1
+; CHECK-GI-NEXT:    and x10, x2, #0x3f
+; CHECK-GI-NEXT:    bic x8, x8, x2
+; CHECK-GI-NEXT:    lsl x8, x9, x8
+; CHECK-GI-NEXT:    lsr x9, x1, x10
+; CHECK-GI-NEXT:    orr x0, x8, x9
+; CHECK-GI-NEXT:    ret
   %f = call i64 @llvm.fshr.i64(i64 %x, i64 %y, i64 %z)
   ret i64 %f
 }
@@ -204,24 +320,41 @@ define i64 @fshr_i64(i64 %x, i64 %y, i64 %z) {
 ; Verify that weird types are minimally supported.
 declare i37 @llvm.fshr.i37(i37, i37, i37)
 define i37 @fshr_i37(i37 %x, i37 %y, i37 %z) {
-; CHECK-LABEL: fshr_i37:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x9, #46053 // =0xb3e5
-; CHECK-NEXT:    and x8, x2, #0x1fffffffff
-; CHECK-NEXT:    lsl x10, x0, #1
-; CHECK-NEXT:    movk x9, #12398, lsl #16
-; CHECK-NEXT:    movk x9, #15941, lsl #32
-; CHECK-NEXT:    movk x9, #1771, lsl #48
-; CHECK-NEXT:    umulh x8, x8, x9
-; CHECK-NEXT:    mov w9, #37 // =0x25
-; CHECK-NEXT:    msub w8, w8, w9, w2
-; CHECK-NEXT:    lsl x9, x1, #27
-; CHECK-NEXT:    add w8, w8, #27
-; CHECK-NEXT:    mvn w11, w8
-; CHECK-NEXT:    lsr x8, x9, x8
-; CHECK-NEXT:    lsl x9, x10, x11
-; CHECK-NEXT:    orr x0, x9, x8
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshr_i37:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov x9, #46053 // =0xb3e5
+; CHECK-SD-NEXT:    and x8, x2, #0x1fffffffff
+; CHECK-SD-NEXT:    lsl x10, x0, #1
+; CHECK-SD-NEXT:    movk x9, #12398, lsl #16
+; CHECK-SD-NEXT:    movk x9, #15941, lsl #32
+; CHECK-SD-NEXT:    movk x9, #1771, lsl #48
+; CHECK-SD-NEXT:    umulh x8, x8, x9
+; CHECK-SD-NEXT:    mov w9, #37 // =0x25
+; CHECK-SD-NEXT:    msub w8, w8, w9, w2
+; CHECK-SD-NEXT:    lsl x9, x1, #27
+; CHECK-SD-NEXT:    add w8, w8, #27
+; CHECK-SD-NEXT:    mvn w11, w8
+; CHECK-SD-NEXT:    lsr x8, x9, x8
+; CHECK-SD-NEXT:    lsl x9, x10, x11
+; CHECK-SD-NEXT:    orr x0, x9, x8
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshr_i37:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #37 // =0x25
+; CHECK-GI-NEXT:    and x9, x2, #0x1fffffffff
+; CHECK-GI-NEXT:    and x11, x1, #0x1fffffffff
+; CHECK-GI-NEXT:    udiv x10, x9, x8
+; CHECK-GI-NEXT:    msub x8, x10, x8, x9
+; CHECK-GI-NEXT:    mov w9, #36 // =0x24
+; CHECK-GI-NEXT:    lsl x10, x0, #1
+; CHECK-GI-NEXT:    sub x9, x9, x8
+; CHECK-GI-NEXT:    and x8, x8, #0x1fffffffff
+; CHECK-GI-NEXT:    and x9, x9, #0x1fffffffff
+; CHECK-GI-NEXT:    lsr x8, x11, x8
+; CHECK-GI-NEXT:    lsl x9, x10, x9
+; CHECK-GI-NEXT:    orr x0, x9, x8
+; CHECK-GI-NEXT:    ret
   %f = call i37 @llvm.fshr.i37(i37 %x, i37 %y, i37 %z)
   ret i37 %f
 }
@@ -239,28 +372,45 @@ define i7 @fshr_i7_const_fold() {
 }
 
 define i8 @fshr_i8_const_fold_overshift_1() {
-; CHECK-LABEL: fshr_i8_const_fold_overshift_1:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #254 // =0xfe
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshr_i8_const_fold_overshift_1:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w0, #254 // =0xfe
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshr_i8_const_fold_overshift_1:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w0, #-2 // =0xfffffffe
+; CHECK-GI-NEXT:    ret
   %f = call i8 @llvm.fshr.i8(i8 255, i8 0, i8 15)
   ret i8 %f
 }
 
 define i8 @fshr_i8_const_fold_overshift_2() {
-; CHECK-LABEL: fshr_i8_const_fold_overshift_2:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #225 // =0xe1
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshr_i8_const_fold_overshift_2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w0, #225 // =0xe1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshr_i8_const_fold_overshift_2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #15 // =0xf
+; CHECK-GI-NEXT:    lsl w9, w8, #5
+; CHECK-GI-NEXT:    orr w0, w9, w8, lsr #3
+; CHECK-GI-NEXT:    ret
   %f = call i8 @llvm.fshr.i8(i8 15, i8 15, i8 11)
   ret i8 %f
 }
 
 define i8 @fshr_i8_const_fold_overshift_3() {
-; CHECK-LABEL: fshr_i8_const_fold_overshift_3:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #255 // =0xff
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshr_i8_const_fold_overshift_3:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w0, #255 // =0xff
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshr_i8_const_fold_overshift_3:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w0, #-1 // =0xffffffff
+; CHECK-GI-NEXT:    ret
   %f = call i8 @llvm.fshr.i8(i8 0, i8 255, i8 8)
   ret i8 %f
 }
@@ -301,10 +451,15 @@ define i64 @fshr_i64_const_overshift(i64 %x, i64 %y) {
 ; This should work without any node-specific logic.
 
 define i8 @fshr_i8_const_fold() {
-; CHECK-LABEL: fshr_i8_const_fold:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #254 // =0xfe
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fshr_i8_const_fold:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w0, #254 // =0xfe
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fshr_i8_const_fold:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w0, #-2 // =0xfffffffe
+; CHECK-GI-NEXT:    ret
   %f = call i8 @llvm.fshr.i8(i8 255, i8 0, i8 7)
   ret i8 %f
 }
@@ -344,17 +499,30 @@ define <4 x i32> @fshr_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) {
 }
 
 define i32 @or_shl_fshl(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_shl_fshl:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, w2
-; CHECK-NEXT:    lsr w9, w1, #1
-; CHECK-NEXT:    lsl w10, w1, w2
-; CHECK-NEXT:    mvn w11, w2
-; CHECK-NEXT:    lsl w8, w0, w8
-; CHECK-NEXT:    lsr w9, w9, w11
-; CHECK-NEXT:    orr w8, w8, w10
-; CHECK-NEXT:    orr w0, w8, w9
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: or_shl_fshl:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, w2
+; CHECK-SD-NEXT:    lsr w9, w1, #1
+; CHECK-SD-NEXT:    lsl w10, w1, w2
+; CHECK-SD-NEXT:    mvn w11, w2
+; CHECK-SD-NEXT:    lsl w8, w0, w8
+; CHECK-SD-NEXT:    lsr w9, w9, w11
+; CHECK-SD-NEXT:    orr w8, w8, w10
+; CHECK-SD-NEXT:    orr w0, w8, w9
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: or_shl_fshl:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #31 // =0x1f
+; CHECK-GI-NEXT:    and w9, w2, #0x1f
+; CHECK-GI-NEXT:    lsr w10, w1, #1
+; CHECK-GI-NEXT:    lsl w11, w1, w2
+; CHECK-GI-NEXT:    bic w8, w8, w2
+; CHECK-GI-NEXT:    lsl w9, w0, w9
+; CHECK-GI-NEXT:    lsr w8, w10, w8
+; CHECK-GI-NEXT:    orr w9, w9, w11
+; CHECK-GI-NEXT:    orr w0, w9, w8
+; CHECK-GI-NEXT:    ret
   %shy = shl i32 %y, %s
   %fun = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %s)
   %or = or i32 %fun, %shy
@@ -376,17 +544,30 @@ define i32 @or_shl_rotl(i32 %x, i32 %y, i32 %s) {
 }
 
 define i32 @or_shl_fshl_commute(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_shl_fshl_commute:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, w2
-; CHECK-NEXT:    lsr w9, w1, #1
-; CHECK-NEXT:    lsl w10, w1, w2
-; CHECK-NEXT:    mvn w11, w2
-; CHECK-NEXT:    lsl w8, w0, w8
-; CHECK-NEXT:    lsr w9, w9, w11
-; CHECK-NEXT:    orr w8, w10, w8
-; CHECK-NEXT:    orr w0, w8, w9
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: or_shl_fshl_commute:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, w2
+; CHECK-SD-NEXT:    lsr w9, w1, #1
+; CHECK-SD-NEXT:    lsl w10, w1, w2
+; CHECK-SD-NEXT:    mvn w11, w2
+; CHECK-SD-NEXT:    lsl w8, w0, w8
+; CHECK-SD-NEXT:    lsr w9, w9, w11
+; CHECK-SD-NEXT:    orr w8, w10, w8
+; CHECK-SD-NEXT:    orr w0, w8, w9
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: or_shl_fshl_commute:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #31 // =0x1f
+; CHECK-GI-NEXT:    and w9, w2, #0x1f
+; CHECK-GI-NEXT:    lsr w10, w1, #1
+; CHECK-GI-NEXT:    lsl w11, w1, w2
+; CHECK-GI-NEXT:    bic w8, w8, w2
+; CHECK-GI-NEXT:    lsl w9, w0, w9
+; CHECK-GI-NEXT:    lsr w8, w10, w8
+; CHECK-GI-NEXT:    orr w9, w11, w9
+; CHECK-GI-NEXT:    orr w0, w9, w8
+; CHECK-GI-NEXT:    ret
   %shy = shl i32 %y, %s
   %fun = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %s)
   %or = or i32 %shy, %fun
@@ -408,17 +589,30 @@ define i32 @or_shl_rotl_commute(i32 %x, i32 %y, i32 %s) {
 }
 
 define i32 @or_lshr_fshr(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_lshr_fshr:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, w2
-; CHECK-NEXT:    lsl w9, w1, #1
-; CHECK-NEXT:    lsr w10, w1, w2
-; CHECK-NEXT:    lsr w8, w0, w8
-; CHECK-NEXT:    mvn w11, w2
-; CHECK-NEXT:    lsl w9, w9, w11
-; CHECK-NEXT:    orr w8, w8, w10
-; CHECK-NEXT:    orr w0, w9, w8
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: or_lshr_fshr:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, w2
+; CHECK-SD-NEXT:    lsl w9, w1, #1
+; CHECK-SD-NEXT:    lsr w10, w1, w2
+; CHECK-SD-NEXT:    lsr w8, w0, w8
+; CHECK-SD-NEXT:    mvn w11, w2
+; CHECK-SD-NEXT:    lsl w9, w9, w11
+; CHECK-SD-NEXT:    orr w8, w8, w10
+; CHECK-SD-NEXT:    orr w0, w9, w8
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: or_lshr_fshr:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #31 // =0x1f
+; CHECK-GI-NEXT:    and w9, w2, #0x1f
+; CHECK-GI-NEXT:    lsl w10, w1, #1
+; CHECK-GI-NEXT:    lsr w11, w1, w2
+; CHECK-GI-NEXT:    bic w8, w8, w2
+; CHECK-GI-NEXT:    lsr w9, w0, w9
+; CHECK-GI-NEXT:    lsl w8, w10, w8
+; CHECK-GI-NEXT:    orr w9, w9, w11
+; CHECK-GI-NEXT:    orr w0, w8, w9
+; CHECK-GI-NEXT:    ret
   %shy = lshr i32 %y, %s
   %fun = call i32 @llvm.fshr.i32(i32 %y, i32 %x, i32 %s)
   %or = or i32 %fun, %shy
@@ -439,17 +633,30 @@ define i32 @or_lshr_rotr(i32 %x, i32 %y, i32 %s) {
 }
 
 define i32 @or_lshr_fshr_commute(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_lshr_fshr_commute:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, w2
-; CHECK-NEXT:    lsl w9, w1, #1
-; CHECK-NEXT:    lsr w10, w1, w2
-; CHECK-NEXT:    lsr w8, w0, w8
-; CHECK-NEXT:    mvn w11, w2
-; CHECK-NEXT:    lsl w9, w9, w11
-; CHECK-NEXT:    orr w8, w10, w8
-; CHECK-NEXT:    orr w0, w8, w9
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: or_lshr_fshr_commute:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, w2
+; CHECK-SD-NEXT:    lsl w9, w1, #1
+; CHECK-SD-NEXT:    lsr w10, w1, w2
+; CHECK-SD-NEXT:    lsr w8, w0, w8
+; CHECK-SD-NEXT:    mvn w11, w2
+; CHECK-SD-NEXT:    lsl w9, w9, w11
+; CHECK-SD-NEXT:    orr w8, w10, w8
+; CHECK-SD-NEXT:    orr w0, w8, w9
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: or_lshr_fshr_commute:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #31 // =0x1f
+; CHECK-GI-NEXT:    and w9, w2, #0x1f
+; CHECK-GI-NEXT:    lsl w10, w1, #1
+; CHECK-GI-NEXT:    lsr w11, w1, w2
+; CHECK-GI-NEXT:    bic w8, w8, w2
+; CHECK-GI-NEXT:    lsr w9, w0, w9
+; CHECK-GI-NEXT:    lsl w8, w10, w8
+; CHECK-GI-NEXT:    orr w9, w11, w9
+; CHECK-GI-NEXT:    orr w0, w9, w8
+; CHECK-GI-NEXT:    ret
   %shy = lshr i32 %y, %s
   %fun = call i32 @llvm.fshr.i32(i32 %y, i32 %x, i32 %s)
   %or = or i32 %shy, %fun
@@ -470,15 +677,28 @@ define i32 @or_lshr_rotr_commute(i32 %x, i32 %y, i32 %s) {
 }
 
 define i32 @or_shl_fshl_simplify(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_shl_fshl_simplify:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    lsr w8, w0, #1
-; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
-; CHECK-NEXT:    mvn w9, w2
-; CHECK-NEXT:    lsl w10, w1, w2
-; CHECK-NEXT:    lsr w8, w8, w9
-; CHECK-NEXT:    orr w0, w10, w8
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: or_shl_fshl_simplify:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    lsr w8, w0, #1
+; CHECK-SD-NEXT:    // kill: def $w2 killed $w2 def $x2
+; CHECK-SD-NEXT:    mvn w9, w2
+; CHECK-SD-NEXT:    lsl w10, w1, w2
+; CHECK-SD-NEXT:    lsr w8, w8, w9
+; CHECK-SD-NEXT:    orr w0, w10, w8
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: or_shl_fshl_simplify:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #31 // =0x1f
+; CHECK-GI-NEXT:    and w9, w2, #0x1f
+; CHECK-GI-NEXT:    lsr w10, w0, #1
+; CHECK-GI-NEXT:    lsl w11, w1, w2
+; CHECK-GI-NEXT:    bic w8, w8, w2
+; CHECK-GI-NEXT:    lsl w9, w1, w9
+; CHECK-GI-NEXT:    lsr w8, w10, w8
+; CHECK-GI-NEXT:    orr w9, w9, w11
+; CHECK-GI-NEXT:    orr w0, w9, w8
+; CHECK-GI-NEXT:    ret
   %shy = shl i32 %y, %s
   %fun = call i32 @llvm.fshl.i32(i32 %y, i32 %x, i32 %s)
   %or = or i32 %fun, %shy
@@ -486,15 +706,28 @@ define i32 @or_shl_fshl_simplify(i32 %x, i32 %y, i32 %s) {
 }
 
 define i32 @or_lshr_fshr_simplify(i32 %x, i32 %y, i32 %s) {
-; CHECK-LABEL: or_lshr_fshr_simplify:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    lsl w8, w0, #1
-; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
-; CHECK-NEXT:    mvn w9, w2
-; CHECK-NEXT:    lsr w10, w1, w2
-; CHECK-NEXT:    lsl w8, w8, w9
-; CHECK-NEXT:    orr w0, w8, w10
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: or_lshr_fshr_simplify:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    lsl w8, w0, #1
+; CHECK-SD-NEXT:    // kill: def $w2 killed $w2 def $x2
+; CHECK-SD-NEXT:    mvn w9, w2
+; CHECK-SD-NEXT:    lsr w10, w1, w2
+; CHECK-SD-NEXT:    lsl w8, w8, w9
+; CHECK-SD-NEXT:    orr w0, w8, w10
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: or_lshr_fshr_simplify:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #31 // =0x1f
+; CHECK-GI-NEXT:    and w9, w2, #0x1f
+; CHECK-GI-NEXT:    lsl w10, w0, #1
+; CHECK-GI-NEXT:    lsr w11, w1, w2
+; CHECK-GI-NEXT:    bic w8, w8, w2
+; CHECK-GI-NEXT:    lsr w9, w1, w9
+; CHECK-GI-NEXT:    lsl w8, w10, w8
+; CHECK-GI-NEXT:    orr w9, w11, w9
+; CHECK-GI-NEXT:    orr w0, w9, w8
+; CHECK-GI-NEXT:    ret
   %shy = lshr i32 %y, %s
   %fun = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %s)
   %or = or i32 %shy, %fun


        


More information about the llvm-commits mailing list