[llvm] d7d85f7 - [AArch64][GlobalISel] Fix width value for G_SBFX/G_UBFX
Brendon Cahoon via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 29 19:05:21 PDT 2021
Author: Brendon Cahoon
Date: 2021-04-29T21:54:19-04:00
New Revision: d7d85f72ef9b45b45472611196f6b97305832b9a
URL: https://github.com/llvm/llvm-project/commit/d7d85f72ef9b45b45472611196f6b97305832b9a
DIFF: https://github.com/llvm/llvm-project/commit/d7d85f72ef9b45b45472611196f6b97305832b9a.diff
LOG: [AArch64][GlobalISel] Fix width value for G_SBFX/G_UBFX
When creating G_SBFX/G_UBFX opcodes, the last operand is the
width instead of the bit position. The bit position is used
for the AArch64 SBFM and UBFM instructions. The bit position
is converted to a width if the SBFX/UBFX aliases are generated.
For other SBMF/UBFM aliases, such as shifts, the bit position
is used.
Differential Revision: https://reviews.llvm.org/D101543
Added:
Modified:
llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir
llvm/test/CodeGen/AArch64/GlobalISel/select-sbfx.mir
llvm/test/CodeGen/AArch64/GlobalISel/select-ubfx.mir
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 4d3ffb61f741e..047f3b69b7239 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -2326,7 +2326,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
auto BitfieldInst =
MIB.buildInstr(Opc, {I.getOperand(0)}, {I.getOperand(1)})
.addImm(LSB)
- .addImm(Width);
+ .addImm(LSB + Width - 1);
I.eraseFromParent();
return constrainSelectedInstRegOperands(*BitfieldInst, TII, TRI, RBI);
}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
index f770dbdb10844..753b380faf44d 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
@@ -262,7 +262,7 @@ static bool matchBitfieldExtractFromSExtInReg(
return false;
MatchInfo = [=](MachineIRBuilder &B) {
auto Cst1 = B.buildConstant(Ty, ShiftImm);
- auto Cst2 = B.buildConstant(Ty, ShiftImm + Width - 1);
+ auto Cst2 = B.buildConstant(Ty, Width);
B.buildInstr(TargetOpcode::G_SBFX, {Dst}, {ShiftSrc, Cst1, Cst2});
};
return true;
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir
index 4fe777acb79ab..e79939ffcb9fc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir
@@ -15,7 +15,7 @@ body: |
; CHECK: liveins: $w0
; CHECK: %x:_(s32) = COPY $w0
; CHECK: %lsb:_(s32) = G_CONSTANT i32 5
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 14
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
; CHECK: %sext_inreg:_(s32) = G_SBFX %x, %lsb(s32), [[C]]
; CHECK: $w0 = COPY %sext_inreg(s32)
; CHECK: RET_ReallyLR implicit $w0
@@ -38,7 +38,7 @@ body: |
; CHECK: liveins: $w0
; CHECK: %x:_(s32) = COPY $w0
; CHECK: %lsb:_(s32) = G_CONSTANT i32 5
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 14
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
; CHECK: %sext_inreg:_(s32) = G_SBFX %x, %lsb(s32), [[C]]
; CHECK: $w0 = COPY %sext_inreg(s32)
; CHECK: RET_ReallyLR implicit $w0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-sbfx.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-sbfx.mir
index daae44e5fec59..ae11f1b1209a9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-sbfx.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-sbfx.mir
@@ -13,7 +13,7 @@ body: |
; CHECK-LABEL: name: sbfx_s32
; CHECK: liveins: $w0
; CHECK: %copy:gpr32 = COPY $w0
- ; CHECK: %sbfx:gpr32 = SBFMWri %copy, 0, 10
+ ; CHECK: %sbfx:gpr32 = SBFMWri %copy, 0, 9
; CHECK: $w0 = COPY %sbfx
; CHECK: RET_ReallyLR implicit $w0
%copy:gpr(s32) = COPY $w0
@@ -35,7 +35,7 @@ body: |
; CHECK-LABEL: name: sbfx_s64
; CHECK: liveins: $x0
; CHECK: %copy:gpr64 = COPY $x0
- ; CHECK: %sbfx:gpr64 = SBFMXri %copy, 0, 10
+ ; CHECK: %sbfx:gpr64 = SBFMXri %copy, 0, 9
; CHECK: $x0 = COPY %sbfx
; CHECK: RET_ReallyLR implicit $x0
%copy:gpr(s64) = COPY $x0
@@ -47,7 +47,7 @@ body: |
...
---
-name: sbfx_s32_31_31
+name: sbfx_s32_31_1
legalized: true
regBankSelected: true
tracksRegLiveness: true
@@ -56,7 +56,7 @@ body: |
liveins: $w0
; This is just an asr, so it's okay.
- ; CHECK-LABEL: name: sbfx_s32_31_31
+ ; CHECK-LABEL: name: sbfx_s32_31_1
; CHECK: liveins: $w0
; CHECK: %copy:gpr32 = COPY $w0
; CHECK: %sbfx:gpr32 = SBFMWri %copy, 31, 31
@@ -64,7 +64,47 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%copy:gpr(s32) = COPY $w0
%cst1:gpr(s32) = G_CONSTANT i32 31
- %cst2:gpr(s32) = G_CONSTANT i32 31
+ %cst2:gpr(s32) = G_CONSTANT i32 1
%sbfx:gpr(s32) = G_SBFX %copy, %cst1, %cst2
$w0 = COPY %sbfx
RET_ReallyLR implicit $w0
+---
+name: sbfx_s32_10_5
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0
+ ; CHECK-LABEL: name: sbfx_s32_10_5
+ ; CHECK: liveins: $w0
+ ; CHECK: %copy:gpr32 = COPY $w0
+ ; CHECK: %sbfx:gpr32 = SBFMWri %copy, 10, 14
+ ; CHECK: $w0 = COPY %sbfx
+ ; CHECK: RET_ReallyLR implicit $w0
+ %copy:gpr(s32) = COPY $w0
+ %cst1:gpr(s32) = G_CONSTANT i32 10
+ %cst2:gpr(s32) = G_CONSTANT i32 5
+ %sbfx:gpr(s32) = G_SBFX %copy, %cst1, %cst2
+ $w0 = COPY %sbfx
+ RET_ReallyLR implicit $w0
+---
+name: sbfx_s64_10_5
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: sbfx_s64_10_5
+ ; CHECK: liveins: $x0
+ ; CHECK: %copy:gpr64 = COPY $x0
+ ; CHECK: %sbfx:gpr64 = SBFMXri %copy, 10, 14
+ ; CHECK: $x0 = COPY %sbfx
+ ; CHECK: RET_ReallyLR implicit $x0
+ %copy:gpr(s64) = COPY $x0
+ %cst1:gpr(s64) = G_CONSTANT i64 10
+ %cst2:gpr(s64) = G_CONSTANT i64 5
+ %sbfx:gpr(s64) = G_SBFX %copy, %cst1, %cst2
+ $x0 = COPY %sbfx
+ RET_ReallyLR implicit $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ubfx.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ubfx.mir
index 3f629b69b6b0c..c15b668c5aa76 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ubfx.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ubfx.mir
@@ -13,7 +13,7 @@ body: |
; CHECK-LABEL: name: ubfx_s32
; CHECK: liveins: $w0
; CHECK: %copy:gpr32 = COPY $w0
- ; CHECK: %ubfx:gpr32 = UBFMWri %copy, 0, 10
+ ; CHECK: %ubfx:gpr32 = UBFMWri %copy, 0, 9
; CHECK: $w0 = COPY %ubfx
; CHECK: RET_ReallyLR implicit $w0
%copy:gpr(s32) = COPY $w0
@@ -35,7 +35,7 @@ body: |
; CHECK-LABEL: name: ubfx_s64
; CHECK: liveins: $x0
; CHECK: %copy:gpr64 = COPY $x0
- ; CHECK: %ubfx:gpr64 = UBFMXri %copy, 0, 10
+ ; CHECK: %ubfx:gpr64 = UBFMXri %copy, 0, 9
; CHECK: $x0 = COPY %ubfx
; CHECK: RET_ReallyLR implicit $x0
%copy:gpr(s64) = COPY $x0
@@ -47,7 +47,7 @@ body: |
...
---
-name: ubfx_s32_31_31
+name: ubfx_s32_31_1
legalized: true
regBankSelected: true
tracksRegLiveness: true
@@ -57,7 +57,7 @@ body: |
; This is just a lsr, so it's okay.
- ; CHECK-LABEL: name: ubfx_s32_31_31
+ ; CHECK-LABEL: name: ubfx_s32_31_1
; CHECK: liveins: $w0
; CHECK: %copy:gpr32 = COPY $w0
; CHECK: %ubfx:gpr32 = UBFMWri %copy, 31, 31
@@ -65,7 +65,49 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%copy:gpr(s32) = COPY $w0
%cst1:gpr(s32) = G_CONSTANT i32 31
- %cst2:gpr(s32) = G_CONSTANT i32 31
+ %cst2:gpr(s32) = G_CONSTANT i32 1
%ubfx:gpr(s32) = G_UBFX %copy, %cst1, %cst2
$w0 = COPY %ubfx
RET_ReallyLR implicit $w0
+---
+name: ubfx_s32_10_5
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0
+ ; CHECK-LABEL: name: ubfx_s32_10_5
+ ; CHECK: liveins: $w0
+ ; CHECK: %copy:gpr32 = COPY $w0
+ ; CHECK: %ubfx:gpr32 = UBFMWri %copy, 10, 14
+ ; CHECK: $w0 = COPY %ubfx
+ ; CHECK: RET_ReallyLR implicit $w0
+ %copy:gpr(s32) = COPY $w0
+ %cst1:gpr(s32) = G_CONSTANT i32 10
+ %cst2:gpr(s32) = G_CONSTANT i32 5
+ %ubfx:gpr(s32) = G_UBFX %copy, %cst1, %cst2
+ $w0 = COPY %ubfx
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: ubfx_s64_10_5
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: ubfx_s64_10_5
+ ; CHECK: liveins: $x0
+ ; CHECK: %copy:gpr64 = COPY $x0
+ ; CHECK: %ubfx:gpr64 = UBFMXri %copy, 10, 14
+ ; CHECK: $x0 = COPY %ubfx
+ ; CHECK: RET_ReallyLR implicit $x0
+ %copy:gpr(s64) = COPY $x0
+ %cst1:gpr(s64) = G_CONSTANT i64 10
+ %cst2:gpr(s64) = G_CONSTANT i64 5
+ %ubfx:gpr(s64) = G_UBFX %copy, %cst1, %cst2
+ $x0 = COPY %ubfx
+ RET_ReallyLR implicit $x0
More information about the llvm-commits
mailing list