[llvm] [FastISel][AArch64] Compare Instruction Miscompilation Fix (PR #75993)

via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 19 17:46:20 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-aarch64

Author: None (brendaso1)

<details>
<summary>Changes</summary>

When shl is folded in compare instruction, a miscompilation occurs when the CMP instruction is also sign-extended. For the following IR:

  %op3 = shl i8 %op2, 3
  %tmp3 = icmp eq i8 %tmp2, %op3

It used to generate

   cmp w8, w9, sxtb #<!-- -->3

which means sign extend w9, shift left by 3, and then compare with the value in w8. However, the original intention of the IR is "left shift w9 by 3 bits, zero-extend the value, then compare with the value in w8". This PR creates a fix for the issue, more specifically to not fold the left shift into the CMP instruction, and to create a zero-extended value rather than a sign-extended value.


---
Full diff: https://github.com/llvm/llvm-project/pull/75993.diff


2 Files Affected:

- (modified) llvm/lib/Target/AArch64/AArch64FastISel.cpp (+6-2) 
- (added) llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll (+33) 


``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 9b8162ce8dd4d0..ddd2c4eebe491c 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -1233,7 +1233,11 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
       isValueAvailable(RHS)) {
     if (const auto *SI = dyn_cast<BinaryOperator>(RHS))
       if (const auto *C = dyn_cast<ConstantInt>(SI->getOperand(1)))
-        if ((SI->getOpcode() == Instruction::Shl) && (C->getZExtValue() < 4)) {
+        if ((SI->getOpcode() == Instruction::Shl) && (C->getZExtValue() < 4) &&
+            !NeedExtend) {
+          // We can only fold instructions that doesn't need extension because
+          // in add/sub instruction, the instruction is first extended, then
+          // shifted
           Register RHSReg = getRegForValue(SI->getOperand(0));
           if (!RHSReg)
             return 0;
@@ -2426,7 +2430,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) {
       }
 
       // Emit the cmp.
-      if (!emitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
+      if (!emitCmp(CI->getOperand(0), CI->getOperand(1), !CI->isSigned()))
         return false;
 
       // FCMP_UEQ and FCMP_ONE cannot be checked with a single branch
diff --git a/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll b/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll
new file mode 100644
index 00000000000000..5dad290134bbd7
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll
@@ -0,0 +1,33 @@
+; RUN: llc --global-isel=false -fast-isel -O0 -mtriple=aarch64-none-none < %s | FileCheck %s
+
+; Check that the shl instruction did not get folded in together with 
+; the cmp instruction. It would create a miscompilation 
+
+ at A = dso_local global [5 x i8] c"\C8\AA\C8\AA\AA"
+ at .str = private unnamed_addr constant [13 x i8] c"TRUE BRANCH\0A\00"
+ at .str.1 = private unnamed_addr constant [14 x i8] c"FALSE BRANCH\0A\00"
+
+define dso_local i32 @main() {
+
+  %tmp = load i8, ptr getelementptr inbounds ([5 x i8], ptr @A, i64 0, i64 1)
+  %tmp2 = load i8, ptr getelementptr inbounds ([5 x i8], ptr @A, i64 0, i64 2)
+  %op1 = xor i8 %tmp, -49
+  %op2 = mul i8 %op1, %op1
+; CHECK-NOT: cmp [[REGS:.*]] #[[SHIFT_VAL:[0-9]+]]
+  %op3 = shl i8 %op2, 3
+  %tmp3 = icmp eq i8 %tmp2, %op3
+  br i1 %tmp3, label %_true, label %_false
+
+_true:
+  %res = call i32 (ptr, ...) @printf(ptr noundef @.str)
+  br label %_ret
+
+_false:
+  %res2 = call i32 (ptr, ...) @printf(ptr noundef @.str.1)
+  br label %_ret
+
+_ret:
+  ret i32 0
+}
+
+declare i32 @printf(ptr noundef, ...) 

``````````

</details>


https://github.com/llvm/llvm-project/pull/75993


More information about the llvm-commits mailing list