[llvm] 923f6ac - [FastISel][AArch64] Compare Instruction Miscompilation Fix (#75993)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 2 21:49:09 PST 2024


Author: brendaso1
Date: 2024-01-03T13:49:05+08:00
New Revision: 923f6ac018e3dd1c86bd4cee05e95680962e1446

URL: https://github.com/llvm/llvm-project/commit/923f6ac018e3dd1c86bd4cee05e95680962e1446
DIFF: https://github.com/llvm/llvm-project/commit/923f6ac018e3dd1c86bd4cee05e95680962e1446.diff

LOG: [FastISel][AArch64] Compare Instruction Miscompilation Fix (#75993)

When shl is folded in compare instruction, a miscompilation occurs when
the CMP instruction is also sign-extended. For the following IR:

  %op3 = shl i8 %op2, 3
  %tmp3 = icmp eq i8 %tmp2, %op3

It used to generate

   cmp w8, w9, sxtb #3

which means sign extend w9, shift left by 3, and then compare with the
value in w8. However, the original intention of the IR would require
`%op2` to first shift left before extending the operands in the
comparison operation . Moreover, if sign extension is used instead of
zero extension, the sample test would miscompile. This PR creates a fix
for the issue, more specifically to not fold the left shift into the CMP
instruction, and to create a zero-extended value rather than a
sign-extended value.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64FastISel.cpp
    llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 9b8162ce8dd4d0..1ea63a5d6ec08d 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -1231,15 +1231,6 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
   // Only extend the RHS within the instruction if there is a valid extend type.
   if (ExtendType != AArch64_AM::InvalidShiftExtend && RHS->hasOneUse() &&
       isValueAvailable(RHS)) {
-    if (const auto *SI = dyn_cast<BinaryOperator>(RHS))
-      if (const auto *C = dyn_cast<ConstantInt>(SI->getOperand(1)))
-        if ((SI->getOpcode() == Instruction::Shl) && (C->getZExtValue() < 4)) {
-          Register RHSReg = getRegForValue(SI->getOperand(0));
-          if (!RHSReg)
-            return 0;
-          return emitAddSub_rx(UseAdd, RetVT, LHSReg, RHSReg, ExtendType,
-                               C->getZExtValue(), SetFlags, WantResult);
-        }
     Register RHSReg = getRegForValue(RHS);
     if (!RHSReg)
       return 0;

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
index f853c0802cb1be..ac08825c237629 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --function icmp_i8_shift_and_cmp --version 4
 ; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
 
 define i32 @icmp_eq_imm(i32 %a) nounwind ssp {
@@ -257,3 +258,18 @@ entry:
   %conv2 = zext i1 %cmp to i32
   ret i32 %conv2
 }
+
+define i32 @icmp_i8_shift_and_cmp(i8 %a, i8 %b) {
+entry:
+; CHECK-LABEL: icmp_i8_shift_and_cmp:
+; CHECK:       ubfiz [[REG1:w[0-9]+]], w0, #3, #5
+; CHECK-NEXT:  sxtb [[REG0:w[0-9]+]], w1
+; CHECK-NEXT:  cmp [[REG0]], [[REG1]], sxtb
+; CHECK-NEXT:  cset [[REG:w[0-9]+]], eq
+; CHECK-NEXT:  and w0, [[REG]], #0x1
+  %op = shl i8 %a, 3
+  %cmp = icmp eq i8 %b, %op
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+


        


More information about the llvm-commits mailing list