[llvm] r214788 - [FastISel][AArch64] Fix shift lowering for i8 and i16 value types.

Juergen Ributzka juergen at apple.com
Mon Aug 4 14:49:52 PDT 2014


Author: ributzka
Date: Mon Aug  4 16:49:51 2014
New Revision: 214788

URL: http://llvm.org/viewvc/llvm-project?rev=214788&view=rev
Log:
[FastISel][AArch64] Fix shift lowering for i8 and i16 value types.

This fix changes the parameters #r and #s that are passed to the UBFM/SBFM
instruction to get the zero/sign-extension for free.

The original problem was that the shift left would use the 32-bit shift even for
i8/i16 value types, which could leave the upper bits set with "garbage" values.

The arithmetic shift right on the other side would use the wrong MSB as sign-bit
to determine what bits to shift into the value.

This fixes <rdar://problem/17907720>.

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp
    llvm/trunk/test/CodeGen/AArch64/fast-isel-shift.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp?rev=214788&r1=214787&r2=214788&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp Mon Aug  4 16:49:51 2014
@@ -2202,14 +2202,16 @@ unsigned AArch64FastISel::Emit_LSL_ri(MV
   switch (RetVT.SimpleTy) {
   default: return 0;
   case MVT::i8:
+    Opc = AArch64::UBFMWri; ImmR = -Shift % 32; ImmS =  7 - Shift; break;
   case MVT::i16:
+    Opc = AArch64::UBFMWri; ImmR = -Shift % 32; ImmS = 15 - Shift; break;
   case MVT::i32:
-    RetVT = MVT::i32;
     Opc = AArch64::UBFMWri; ImmR = -Shift % 32; ImmS = 31 - Shift; break;
   case MVT::i64:
     Opc = AArch64::UBFMXri; ImmR = -Shift % 64; ImmS = 63 - Shift; break;
   }
 
+  RetVT.SimpleTy = std::max(MVT::i32, RetVT.SimpleTy);
   return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, ImmR,
                           ImmS);
 }
@@ -2219,15 +2221,13 @@ unsigned AArch64FastISel::Emit_LSR_ri(MV
   unsigned Opc, ImmS;
   switch (RetVT.SimpleTy) {
   default: return 0;
-  case MVT::i8:
-  case MVT::i16:
-  case MVT::i32:
-    RetVT = MVT::i32;
-    Opc = AArch64::UBFMWri; ImmS = 31; break;
-  case MVT::i64:
-    Opc = AArch64::UBFMXri; ImmS = 63; break;
+  case MVT::i8:  Opc = AArch64::UBFMWri; ImmS =  7; break;
+  case MVT::i16: Opc = AArch64::UBFMWri; ImmS = 15; break;
+  case MVT::i32: Opc = AArch64::UBFMWri; ImmS = 31; break;
+  case MVT::i64: Opc = AArch64::UBFMXri; ImmS = 63; break;
   }
 
+  RetVT.SimpleTy = std::max(MVT::i32, RetVT.SimpleTy);
   return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, Shift,
                           ImmS);
 }
@@ -2237,15 +2237,13 @@ unsigned AArch64FastISel::Emit_ASR_ri(MV
   unsigned Opc, ImmS;
   switch (RetVT.SimpleTy) {
   default: return 0;
-  case MVT::i8:
-  case MVT::i16:
-  case MVT::i32:
-    RetVT = MVT::i32;
-    Opc = AArch64::SBFMWri; ImmS = 31; break;
-  case MVT::i64:
-    Opc = AArch64::SBFMXri; ImmS = 63; break;
+  case MVT::i8:  Opc = AArch64::SBFMWri; ImmS =  7; break;
+  case MVT::i16: Opc = AArch64::SBFMWri; ImmS = 15; break;
+  case MVT::i32: Opc = AArch64::SBFMWri; ImmS = 31; break;
+  case MVT::i64: Opc = AArch64::SBFMXri; ImmS = 63; break;
   }
 
+  RetVT.SimpleTy = std::max(MVT::i32, RetVT.SimpleTy);
   return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, Shift,
                           ImmS);
 }

Modified: llvm/trunk/test/CodeGen/AArch64/fast-isel-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fast-isel-shift.ll?rev=214788&r1=214787&r2=214788&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fast-isel-shift.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fast-isel-shift.ll Mon Aug  4 16:49:51 2014
@@ -1,14 +1,14 @@
 ; RUN: llc -fast-isel -fast-isel-abort -mtriple=arm64-apple-darwin < %s | FileCheck %s
 
 ; CHECK-LABEL: lsl_i8
-; CHECK: lsl {{w[0-9]*}}, {{w[0-9]*}}, #4
+; CHECK: ubfiz {{w[0-9]*}}, {{w[0-9]*}}, #4, #4
 define zeroext i8 @lsl_i8(i8 %a) {
   %1 = shl i8 %a, 4
   ret i8 %1
 }
 
 ; CHECK-LABEL: lsl_i16
-; CHECK: lsl {{w[0-9]*}}, {{w[0-9]*}}, #8
+; CHECK: ubfiz {{w[0-9]*}}, {{w[0-9]*}}, #8, #8
 define zeroext i16 @lsl_i16(i16 %a) {
   %1 = shl i16 %a, 8
   ret i16 %1
@@ -30,14 +30,14 @@ define i64 @lsl_i64(i64 %a) {
 }
 
 ; CHECK-LABEL: lsr_i8
-; CHECK: lsr {{w[0-9]*}}, {{w[0-9]*}}, #4
+; CHECK: ubfx {{w[0-9]*}}, {{w[0-9]*}}, #4, #4
 define zeroext i8 @lsr_i8(i8 %a) {
   %1 = lshr i8 %a, 4
   ret i8 %1
 }
 
 ; CHECK-LABEL: lsr_i16
-; CHECK: lsr {{w[0-9]*}}, {{w[0-9]*}}, #8
+; CHECK: ubfx {{w[0-9]*}}, {{w[0-9]*}}, #8, #8
 define zeroext i16 @lsr_i16(i16 %a) {
   %1 = lshr i16 %a, 8
   ret i16 %1
@@ -59,14 +59,14 @@ define i64 @lsr_i64(i64 %a) {
 }
 
 ; CHECK-LABEL: asr_i8
-; CHECK: asr {{w[0-9]*}}, {{w[0-9]*}}, #4
+; CHECK: sbfx {{w[0-9]*}}, {{w[0-9]*}}, #4, #4
 define zeroext i8 @asr_i8(i8 %a) {
   %1 = ashr i8 %a, 4
   ret i8 %1
 }
 
 ; CHECK-LABEL: asr_i16
-; CHECK: asr {{w[0-9]*}}, {{w[0-9]*}}, #8
+; CHECK: sbfx {{w[0-9]*}}, {{w[0-9]*}}, #8, #8
 define zeroext i16 @asr_i16(i16 %a) {
   %1 = ashr i16 %a, 8
   ret i16 %1
@@ -87,3 +87,13 @@ define i64 @asr_i64(i64 %a) {
   ret i64 %1
 }
 
+; CHECK-LABEL: shift_test1
+; CHECK:       ubfiz {{w[0-9]*}}, {{w[0-9]*}}, #4, #4
+; CHECK-NEXT:  sbfx  {{w[0-9]*}}, {{w[0-9]*}}, #4, #4
+define i32 @shift_test1(i8 %a) {
+  %1 = shl i8 %a, 4
+  %2 = ashr i8 %1, 4
+  %3 = sext i8 %2 to i32
+  ret i32 %3
+}
+





More information about the llvm-commits mailing list