[llvm] r340405 - [ARM] Rotated operand patterns for *xtb16

Sam Parker via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 22 05:58:36 PDT 2018


Author: sam_parker
Date: Wed Aug 22 05:58:36 2018
New Revision: 340405

URL: http://llvm.org/viewvc/llvm-project?rev=340405&view=rev
Log:
[ARM] Rotated operand patterns for *xtb16

Add intrinsic isel patterns for sxtb16, sxtab16, uxtb16 and uxtab16
so that they can perform a ror.

Differential Revision: https://reviews.llvm.org/D51034

Added:
    llvm/trunk/test/CodeGen/ARM/acle-intrinsics-rot.ll
Modified:
    llvm/trunk/lib/Target/ARM/ARMInstrInfo.td
    llvm/trunk/lib/Target/ARM/ARMInstrThumb2.td

Modified: llvm/trunk/lib/Target/ARM/ARMInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrInfo.td?rev=340405&r1=340404&r2=340405&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrInfo.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrInfo.td Wed Aug 22 05:58:36 2018
@@ -3521,10 +3521,14 @@ def : ARMV6Pat<(add rGPR:$Rn, (sext_inre
 def SXTB16  : AI_ext_rrot_np<0b01101000, "sxtb16">;
 def : ARMV6Pat<(int_arm_sxtb16 GPR:$Src),
                (SXTB16 GPR:$Src, 0)>;
+def : ARMV6Pat<(int_arm_sxtb16 (rotr GPR:$Src, rot_imm:$rot)),
+               (SXTB16 GPR:$Src, rot_imm:$rot)>;
 
 def SXTAB16 : AI_exta_rrot_np<0b01101000, "sxtab16">;
 def : ARMV6Pat<(int_arm_sxtab16 GPR:$LHS, GPR:$RHS),
                (SXTAB16 GPR:$LHS, GPR:$RHS, 0)>;
+def : ARMV6Pat<(int_arm_sxtab16 GPR:$LHS, (rotr GPR:$RHS, rot_imm:$rot)),
+               (SXTAB16 GPR:$LHS, GPR:$RHS, rot_imm:$rot)>;
 
 // Zero extenders
 
@@ -3546,6 +3550,8 @@ def : ARMV6Pat<(and (srl GPR:$Src, (i32
                (UXTB16 GPR:$Src, 1)>;
 def : ARMV6Pat<(int_arm_uxtb16 GPR:$Src),
                (UXTB16 GPR:$Src, 0)>;
+def : ARMV6Pat<(int_arm_uxtb16 (rotr GPR:$Src, rot_imm:$rot)),
+               (UXTB16 GPR:$Src, rot_imm:$rot)>;
 
 def UXTAB : AI_exta_rrot<0b01101110, "uxtab",
                         BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
@@ -3562,6 +3568,8 @@ def : ARMV6Pat<(add rGPR:$Rn, (and (srl
 def UXTAB16 : AI_exta_rrot_np<0b01101100, "uxtab16">;
 def : ARMV6Pat<(int_arm_uxtab16 GPR:$LHS, GPR:$RHS),
                (UXTAB16 GPR:$LHS, GPR:$RHS, 0)>;
+def : ARMV6Pat<(int_arm_uxtab16 GPR:$LHS, (rotr GPR:$RHS, rot_imm:$rot)),
+               (UXTAB16 GPR:$LHS, GPR:$RHS, rot_imm:$rot)>;
 
 
 def SBFX  : I<(outs GPRnopc:$Rd),

Modified: llvm/trunk/lib/Target/ARM/ARMInstrThumb2.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrThumb2.td?rev=340405&r1=340404&r2=340405&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrThumb2.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrThumb2.td Wed Aug 22 05:58:36 2018
@@ -1997,6 +1997,10 @@ def : Thumb2DSPPat<(int_arm_sxtb16 rGPR:
                    (t2SXTB16 rGPR:$Rn, 0)>;
 def : Thumb2DSPPat<(int_arm_sxtab16 rGPR:$Rn, rGPR:$Rm),
                    (t2SXTAB16 rGPR:$Rn, rGPR:$Rm, 0)>;
+def : Thumb2DSPPat<(int_arm_sxtb16 (rotr rGPR:$Rn, rot_imm:$rot)),
+                   (t2SXTB16 rGPR:$Rn, rot_imm:$rot)>;
+def : Thumb2DSPPat<(int_arm_sxtab16 rGPR:$Rn, (rotr rGPR:$Rm, rot_imm:$rot)),
+                   (t2SXTAB16 rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
 
 
 // A simple right-shift can also be used in most cases (the exception is the
@@ -2032,6 +2036,8 @@ def : Thumb2DSPPat<(and (rotr rGPR:$Rm,
 
 def : Thumb2DSPPat<(int_arm_uxtb16 rGPR:$Rm),
                    (t2UXTB16 rGPR:$Rm, 0)>;
+def : Thumb2DSPPat<(int_arm_uxtb16 (rotr rGPR:$Rn, rot_imm:$rot)),
+                   (t2UXTB16 rGPR:$Rn, rot_imm:$rot)>;
 
 // FIXME: This pattern incorrectly assumes the shl operator is a rotate.
 //        The transformation should probably be done as a combiner action
@@ -2062,6 +2068,8 @@ def : Thumb2DSPPat<(add rGPR:$Rn, (and (
                        (t2UXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
 def : Thumb2DSPPat<(int_arm_uxtab16 rGPR:$Rn, rGPR:$Rm),
                       (t2UXTAB16 rGPR:$Rn, rGPR:$Rm, 0)>;
+def : Thumb2DSPPat<(int_arm_uxtab16 rGPR:$Rn, (rotr rGPR:$Rm, rot_imm:$rot)),
+                   (t2UXTAB16 rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
 }
 
 

Added: llvm/trunk/test/CodeGen/ARM/acle-intrinsics-rot.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/acle-intrinsics-rot.ll?rev=340405&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/acle-intrinsics-rot.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/acle-intrinsics-rot.ll Wed Aug 22 05:58:36 2018
@@ -0,0 +1,143 @@
+; RUN: llc -mtriple=thumbv8m.main -mcpu=cortex-m33 %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv7em %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv6 %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv7 %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv8 %s -o - | FileCheck %s
+
+; CHECK-LABEL: sxtb16_ror_8
+; CHECK: sxtb16 r0, r0, ror #8
+define i32 @sxtb16_ror_8(i32 %a) {
+entry:
+  %shr.i = lshr i32 %a, 8
+  %shl.i = shl i32 %a, 24
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.sxtb16(i32 %or.i)
+  ret i32 %0
+}
+
+; CHECK-LABEL: sxtb16_ror_16
+; CHECK: sxtb16 r0, r0, ror #16
+define i32 @sxtb16_ror_16(i32 %a) {
+entry:
+  %shr.i = lshr i32 %a, 16
+  %shl.i = shl i32 %a, 16
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.sxtb16(i32 %or.i)
+  ret i32 %0
+}
+
+; CHECK-LABEL: sxtb16_ror_24
+; CHECK: sxtb16 r0, r0, ror #24
+define i32 @sxtb16_ror_24(i32 %a) {
+entry:
+  %shr.i = lshr i32 %a, 24
+  %shl.i = shl i32 %a, 8
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.sxtb16(i32 %or.i)
+  ret i32 %0
+}
+
+; CHECK-LABEL: uxtb16_ror_8
+; CHECK: uxtb16 r0, r0, ror #8
+define i32 @uxtb16_ror_8(i32 %a) {
+entry:
+  %shr.i = lshr i32 %a, 8
+  %shl.i = shl i32 %a, 24
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.uxtb16(i32 %or.i)
+  ret i32 %0
+}
+
+; CHECK-LABEL: uxtb16_ror_16
+; CHECK: uxtb16 r0, r0, ror #16
+define i32 @uxtb16_ror_16(i32 %a) {
+entry:
+  %shr.i = lshr i32 %a, 16
+  %shl.i = shl i32 %a, 16
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.uxtb16(i32 %or.i)
+  ret i32 %0
+}
+
+; CHECK-LABEL: uxtb16_ror_24
+; CHECK: uxtb16 r0, r0, ror #24
+define i32 @uxtb16_ror_24(i32 %a) {
+entry:
+  %shr.i = lshr i32 %a, 24
+  %shl.i = shl i32 %a, 8
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.uxtb16(i32 %or.i)
+  ret i32 %0
+}
+
+; CHECK-LABEL: sxtab16_ror_8
+; CHECK: sxtab16 r0, r0, r1, ror #8
+define i32 @sxtab16_ror_8(i32 %a, i32 %b) {
+entry:
+  %shr.i = lshr i32 %b, 8
+  %shl.i = shl i32 %b, 24
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.sxtab16(i32 %a, i32 %or.i)
+  ret i32 %0
+}
+
+; CHECK-LABEL: sxtab16_ror_16
+; CHECK: sxtab16 r0, r0, r1, ror #16
+define i32 @sxtab16_ror_16(i32 %a, i32 %b) {
+entry:
+  %shr.i = lshr i32 %b, 16
+  %shl.i = shl i32 %b, 16
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.sxtab16(i32 %a, i32 %or.i)
+  ret i32 %0
+}
+
+; CHECK-LABEL: sxtab16_ror_24
+; CHECK: sxtab16 r0, r0, r1, ror #24
+define i32 @sxtab16_ror_24(i32 %a, i32 %b) {
+entry:
+  %shr.i = lshr i32 %b, 24
+  %shl.i = shl i32 %b, 8
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.sxtab16(i32 %a, i32 %or.i)
+  ret i32 %0
+}
+
+; CHECK-LABEL: uxtab16_ror_8
+; CHECK: uxtab16 r0, r0, r1, ror #8
+define i32 @uxtab16_ror_8(i32 %a, i32 %b) {
+entry:
+  %shr.i = lshr i32 %b, 8
+  %shl.i = shl i32 %b, 24
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.uxtab16(i32 %a, i32 %or.i)
+  ret i32 %0
+}
+
+; CHECK-LABEL: uxtab16_ror_16
+; CHECK: uxtab16 r0, r0, r1, ror #16
+define i32 @uxtab16_ror_16(i32 %a, i32 %b) {
+entry:
+  %shr.i = lshr i32 %b, 16
+  %shl.i = shl i32 %b, 16
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.uxtab16(i32 %a, i32 %or.i)
+  ret i32 %0
+}
+
+; CHECK-LABEL: uxtab16_ror_24
+; CHECK: uxtab16 r0, r0, r1, ror #24
+define i32 @uxtab16_ror_24(i32 %a, i32 %b) {
+entry:
+  %shr.i = lshr i32 %b, 24
+  %shl.i = shl i32 %b, 8
+  %or.i = or i32 %shl.i, %shr.i
+  %0 = tail call i32 @llvm.arm.uxtab16(i32 %a, i32 %or.i)
+  ret i32 %0
+}
+
+declare i32 @llvm.arm.sxtb16(i32)
+declare i32 @llvm.arm.uxtb16(i32)
+declare i32 @llvm.arm.sxtab16(i32, i32)
+declare i32 @llvm.arm.uxtab16(i32, i32)
+




More information about the llvm-commits mailing list