[llvm] b1a6b2c - [AArch64][GlobalISel] Fix miscompile on carry-in selection (#68840)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 19 10:50:50 PDT 2023
Author: Tobias Stadler
Date: 2023-10-19T19:50:46+02:00
New Revision: b1a6b2cc409e5d54080638ee2df5d8fea698dee2
URL: https://github.com/llvm/llvm-project/commit/b1a6b2cc409e5d54080638ee2df5d8fea698dee2
DIFF: https://github.com/llvm/llvm-project/commit/b1a6b2cc409e5d54080638ee2df5d8fea698dee2.diff
LOG: [AArch64][GlobalISel] Fix miscompile on carry-in selection (#68840)
Eliding the vReg to NZCV conversion instruction for G_UADDE/... is illegal if
it causes the carry generating instruction to become dead because ISel
will just remove the dead instruction.
I accidentally introduced this here: https://reviews.llvm.org/D153164.
As far as I can tell, this is not exposed on the default clang settings,
because on O0 there is always a G_AND between boolean defs and uses, so
the optimization doesn't apply. Thus, when I tried to commit
https://reviews.llvm.org/D159140, which removes these G_ANDs on O0, I
broke some UBSan tests.
We fix this by recursively selecting the previous (NZCV-setting) instruction before continuing selection for the current instruction.
Added:
Modified:
llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
llvm/test/CodeGen/AArch64/GlobalISel/select-sadde.mir
llvm/test/CodeGen/AArch64/GlobalISel/select-ssube.mir
llvm/test/CodeGen/AArch64/GlobalISel/select-uadde.mir
llvm/test/CodeGen/AArch64/GlobalISel/select-usube.mir
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index bf41c19cd6cc726..3409d441704ca17 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -298,6 +298,9 @@ class MachineIRBuilder {
/// Getter for the State
MachineIRBuilderState &getState() { return State; }
+ /// Setter for the State
+ void setState(const MachineIRBuilderState &NewState) { State = NewState; }
+
/// Getter for the basic block we currently build.
const MachineBasicBlock &getMBB() const {
assert(State.MBB && "MachineBasicBlock is not set");
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 1c7a09696e853e2..f4ef6cffdfa4193 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -102,6 +102,11 @@ class AArch64InstructionSelector : public InstructionSelector {
// An early selection function that runs before the selectImpl() call.
bool earlySelect(MachineInstr &I);
+ /// Save state that is shared between select calls, call select on \p I and
+ /// then restore the saved state. This can be used to recursively call select
+ /// within a select call.
+ bool selectAndRestoreState(MachineInstr &I);
+
// Do some preprocessing of G_PHIs before we begin selection.
void processPHIs(MachineFunction &MF);
@@ -3552,6 +3557,13 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
return false;
}
+bool AArch64InstructionSelector::selectAndRestoreState(MachineInstr &I) {
+ MachineIRBuilderState OldMIBState = MIB.getState();
+ bool Success = select(I);
+ MIB.setState(OldMIBState);
+ return Success;
+}
+
bool AArch64InstructionSelector::selectReduction(MachineInstr &I,
MachineRegisterInfo &MRI) {
Register VecReg = I.getOperand(1).getReg();
@@ -4749,11 +4761,17 @@ MachineInstr *AArch64InstructionSelector::emitCarryIn(MachineInstr &I,
// emit a carry generating instruction. E.g. for G_UADDE/G_USUBE sequences
// generated during legalization of wide add/sub. This optimization depends on
// these sequences not being interrupted by other instructions.
+ // We have to select the previous instruction before the carry-using
+ // instruction is deleted by the calling function, otherwise the previous
+ // instruction might become dead and would get deleted.
MachineInstr *SrcMI = MRI->getVRegDef(CarryReg);
if (SrcMI == I.getPrevNode()) {
if (auto *CarrySrcMI = dyn_cast<GAddSubCarryOut>(SrcMI)) {
bool ProducesNegatedCarry = CarrySrcMI->isSub();
- if (NeedsNegatedCarry == ProducesNegatedCarry && CarrySrcMI->isUnsigned())
+ if (NeedsNegatedCarry == ProducesNegatedCarry &&
+ CarrySrcMI->isUnsigned() &&
+ CarrySrcMI->getCarryOutReg() == CarryReg &&
+ selectAndRestoreState(*SrcMI))
return nullptr;
}
}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-sadde.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-sadde.mir
index 85625ced4ba6922..e98ab4af570978d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-sadde.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-sadde.mir
@@ -175,3 +175,34 @@ body: |
$x2 = COPY %9(s64)
RET_ReallyLR implicit $x0, implicit $x1, implicit $x2
...
+...
+---
+name: sadde_opt_prev_dead
+alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x0, $x1, $x2, $x3
+ ; CHECK-LABEL: name: sadde_opt_prev_dead
+ ; CHECK: liveins: $x0, $x1, $x2, $x3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY $x3
+ ; CHECK-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY2]], implicit-def $nzcv
+ ; CHECK-NEXT: [[ADCSXr:%[0-9]+]]:gpr64 = ADCSXr [[COPY1]], [[COPY3]], implicit-def $nzcv, implicit $nzcv
+ ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
+ ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = COPY $x1
+ %2:gpr(s64) = COPY $x2
+ %3:gpr(s64) = COPY $x3
+ %4:gpr(s64), %5:gpr(s32) = G_UADDO %0, %2
+ %6:gpr(s64), %7:gpr(s32) = G_SADDE %1, %3, %5
+ $w0 = COPY %7(s32)
+ RET_ReallyLR implicit $w0
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ssube.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ssube.mir
index 00bd26cc0220d2b..a9da51781efbe0e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ssube.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ssube.mir
@@ -175,3 +175,34 @@ body: |
$x2 = COPY %9(s64)
RET_ReallyLR implicit $x0, implicit $x1, implicit $x2
...
+...
+---
+name: ssube_opt_prev_dead
+alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x0, $x1, $x2, $x3
+ ; CHECK-LABEL: name: ssube_opt_prev_dead
+ ; CHECK: liveins: $x0, $x1, $x2, $x3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY $x3
+ ; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY2]], implicit-def $nzcv
+ ; CHECK-NEXT: [[SBCSXr:%[0-9]+]]:gpr64 = SBCSXr [[COPY1]], [[COPY3]], implicit-def $nzcv, implicit $nzcv
+ ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
+ ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = COPY $x1
+ %2:gpr(s64) = COPY $x2
+ %3:gpr(s64) = COPY $x3
+ %4:gpr(s64), %5:gpr(s32) = G_USUBO %0, %2
+ %6:gpr(s64), %7:gpr(s32) = G_SSUBE %1, %3, %5
+ $w0 = COPY %7(s32)
+ RET_ReallyLR implicit $w0
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-uadde.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-uadde.mir
index dc80d0c9abc252e..f6f9964e6babd34 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-uadde.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-uadde.mir
@@ -175,3 +175,34 @@ body: |
$x2 = COPY %9(s64)
RET_ReallyLR implicit $x0, implicit $x1, implicit $x2
...
+...
+---
+name: uadde_opt_prev_dead
+alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x0, $x1, $x2, $x3
+ ; CHECK-LABEL: name: uadde_opt_prev_dead
+ ; CHECK: liveins: $x0, $x1, $x2, $x3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY $x3
+ ; CHECK-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY2]], implicit-def $nzcv
+ ; CHECK-NEXT: [[ADCSXr:%[0-9]+]]:gpr64 = ADCSXr [[COPY1]], [[COPY3]], implicit-def $nzcv, implicit $nzcv
+ ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
+ ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = COPY $x1
+ %2:gpr(s64) = COPY $x2
+ %3:gpr(s64) = COPY $x3
+ %4:gpr(s64), %5:gpr(s32) = G_UADDO %0, %2
+ %6:gpr(s64), %7:gpr(s32) = G_UADDE %1, %3, %5
+ $w0 = COPY %7(s32)
+ RET_ReallyLR implicit $w0
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-usube.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-usube.mir
index c532474fc67b4f7..fa8799653a570cc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-usube.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-usube.mir
@@ -175,3 +175,34 @@ body: |
$x2 = COPY %9(s64)
RET_ReallyLR implicit $x0, implicit $x1, implicit $x2
...
+...
+---
+name: usube_opt_prev_dead
+alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x0, $x1, $x2, $x3
+ ; CHECK-LABEL: name: usube_opt_prev_dead
+ ; CHECK: liveins: $x0, $x1, $x2, $x3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY $x3
+ ; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY2]], implicit-def $nzcv
+ ; CHECK-NEXT: [[SBCSXr:%[0-9]+]]:gpr64 = SBCSXr [[COPY1]], [[COPY3]], implicit-def $nzcv, implicit $nzcv
+ ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
+ ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = COPY $x1
+ %2:gpr(s64) = COPY $x2
+ %3:gpr(s64) = COPY $x3
+ %4:gpr(s64), %5:gpr(s32) = G_USUBO %0, %2
+ %6:gpr(s64), %7:gpr(s32) = G_USUBE %1, %3, %5
+ $w0 = COPY %7(s32)
+ RET_ReallyLR implicit $w0
+...
More information about the llvm-commits
mailing list