[llvm] [FastISel][AArch64] Compare Instruction Miscompilation Fix (PR #75993)

via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 25 04:36:55 PST 2023


https://github.com/brendaso1 updated https://github.com/llvm/llvm-project/pull/75993

>From 56575618c149c4a682be90b5d2fc91604b5179d7 Mon Sep 17 00:00:00 2001
From: Brenda So <mayyeebrenda_so at apple.com>
Date: Tue, 19 Dec 2023 14:44:12 -0800
Subject: [PATCH 1/3] [FastISel][AArch64] compare instruction miscompilation
 fix

When shl is folded in compare instruction, a miscompilation occurs
when the CMP instruction is also sign-extended. This fix avoids
folding shl into cmp instruction when the second operand is
extended, and also turns equality operations into unsigned
comparisons.
---
 llvm/lib/Target/AArch64/AArch64FastISel.cpp   |  8 +++--
 .../CodeGen/AArch64/fastisel-shift-and-cmp.ll | 33 +++++++++++++++++++
 2 files changed, 39 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll

diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 9b8162ce8dd4d0..ddd2c4eebe491c 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -1233,7 +1233,11 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
       isValueAvailable(RHS)) {
     if (const auto *SI = dyn_cast<BinaryOperator>(RHS))
       if (const auto *C = dyn_cast<ConstantInt>(SI->getOperand(1)))
-        if ((SI->getOpcode() == Instruction::Shl) && (C->getZExtValue() < 4)) {
+        if ((SI->getOpcode() == Instruction::Shl) && (C->getZExtValue() < 4) &&
+            !NeedExtend) {
+          // We can only fold instructions that doesn't need extension because
+          // in add/sub instruction, the instruction is first extended, then
+          // shifted
           Register RHSReg = getRegForValue(SI->getOperand(0));
           if (!RHSReg)
             return 0;
@@ -2426,7 +2430,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) {
       }
 
       // Emit the cmp.
-      if (!emitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
+      if (!emitCmp(CI->getOperand(0), CI->getOperand(1), !CI->isSigned()))
         return false;
 
       // FCMP_UEQ and FCMP_ONE cannot be checked with a single branch
diff --git a/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll b/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll
new file mode 100644
index 00000000000000..5dad290134bbd7
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll
@@ -0,0 +1,33 @@
+; RUN: llc --global-isel=false -fast-isel -O0 -mtriple=aarch64-none-none < %s | FileCheck %s
+
+; Check that the shl instruction did not get folded in together with 
+; the cmp instruction. It would create a miscompilation 
+
+ at A = dso_local global [5 x i8] c"\C8\AA\C8\AA\AA"
+ at .str = private unnamed_addr constant [13 x i8] c"TRUE BRANCH\0A\00"
+ at .str.1 = private unnamed_addr constant [14 x i8] c"FALSE BRANCH\0A\00"
+
+define dso_local i32 @main() {
+
+  %tmp = load i8, ptr getelementptr inbounds ([5 x i8], ptr @A, i64 0, i64 1)
+  %tmp2 = load i8, ptr getelementptr inbounds ([5 x i8], ptr @A, i64 0, i64 2)
+  %op1 = xor i8 %tmp, -49
+  %op2 = mul i8 %op1, %op1
+; CHECK-NOT: cmp [[REGS:.*]] #[[SHIFT_VAL:[0-9]+]]
+  %op3 = shl i8 %op2, 3
+  %tmp3 = icmp eq i8 %tmp2, %op3
+  br i1 %tmp3, label %_true, label %_false
+
+_true:
+  %res = call i32 (ptr, ...) @printf(ptr noundef @.str)
+  br label %_ret
+
+_false:
+  %res2 = call i32 (ptr, ...) @printf(ptr noundef @.str.1)
+  br label %_ret
+
+_ret:
+  ret i32 0
+}
+
+declare i32 @printf(ptr noundef, ...) 

>From cdc10d8cee9744706edab85e84feff2b1133801b Mon Sep 17 00:00:00 2001
From: Brenda So <mayyeebrenda_so at apple.com>
Date: Mon, 25 Dec 2023 16:23:58 +0800
Subject: [PATCH 2/3] Simplify test/CodeGen/AArch64/fastisel-shift-and-cmp.ll

- remove dso_local
- remove the globals
- remove the loads
- remove the calls
- remove --global-isel=false and -O0
---
 .../CodeGen/AArch64/fastisel-shift-and-cmp.ll | 32 ++++++-------------
 1 file changed, 10 insertions(+), 22 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll b/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll
index 5dad290134bbd7..1067c0428348da 100644
--- a/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll
+++ b/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll
@@ -1,33 +1,21 @@
-; RUN: llc --global-isel=false -fast-isel -O0 -mtriple=aarch64-none-none < %s | FileCheck %s
+; RUN: llc -fast-isel -mtriple=aarch64-none-none < %s | FileCheck %s
 
 ; Check that the shl instruction did not get folded in together with 
 ; the cmp instruction. It would create a miscompilation 
 
- at A = dso_local global [5 x i8] c"\C8\AA\C8\AA\AA"
- at .str = private unnamed_addr constant [13 x i8] c"TRUE BRANCH\0A\00"
- at .str.1 = private unnamed_addr constant [14 x i8] c"FALSE BRANCH\0A\00"
+define i32 @icmp_i8_shift_and_cmp(i8 %a, i8 %b) {
 
-define dso_local i32 @main() {
-
-  %tmp = load i8, ptr getelementptr inbounds ([5 x i8], ptr @A, i64 0, i64 1)
-  %tmp2 = load i8, ptr getelementptr inbounds ([5 x i8], ptr @A, i64 0, i64 2)
-  %op1 = xor i8 %tmp, -49
+  %op1 = xor i8 %a, -49
   %op2 = mul i8 %op1, %op1
 ; CHECK-NOT: cmp [[REGS:.*]] #[[SHIFT_VAL:[0-9]+]]
   %op3 = shl i8 %op2, 3
-  %tmp3 = icmp eq i8 %tmp2, %op3
-  br i1 %tmp3, label %_true, label %_false
-
-_true:
-  %res = call i32 (ptr, ...) @printf(ptr noundef @.str)
-  br label %_ret
-
-_false:
-  %res2 = call i32 (ptr, ...) @printf(ptr noundef @.str.1)
-  br label %_ret
+  %tmp3 = icmp eq i8 %b, %op3
+  %conv = zext i1 %tmp3 to i32
+  ret i32 %conv
+}
 
-_ret:
-  ret i32 0
+define dso_local i32 @main() local_unnamed_addr #0 {
+  %1 = tail call i32 @icmp_i8_shift_and_cmp(i32 noundef 170, i32 noundef 200)
+  ret i32 %1
 }
 
-declare i32 @printf(ptr noundef, ...) 

>From 965299998166d20480c3d0d32ed7077de1693530 Mon Sep 17 00:00:00 2001
From: Brenda So <mayyeebrenda_so at apple.com>
Date: Mon, 25 Dec 2023 20:36:30 +0800
Subject: [PATCH 3/3] removed extraneous function in unit test

---
 llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll b/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll
index 1067c0428348da..55612e7bfad901 100644
--- a/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll
+++ b/llvm/test/CodeGen/AArch64/fastisel-shift-and-cmp.ll
@@ -14,8 +14,3 @@ define i32 @icmp_i8_shift_and_cmp(i8 %a, i8 %b) {
   ret i32 %conv
 }
 
-define dso_local i32 @main() local_unnamed_addr #0 {
-  %1 = tail call i32 @icmp_i8_shift_and_cmp(i32 noundef 170, i32 noundef 200)
-  ret i32 %1
-}
-



More information about the llvm-commits mailing list