[llvm] 9681dc9 - [PowerPC] Exploit `vrl(b|h|w|d)` to perform vector rotation

Kai Luo via llvm-commits llvm-commits at lists.llvm.org
Sun Dec 22 19:07:01 PST 2019


Author: Kai Luo
Date: 2019-12-23T03:04:43Z
New Revision: 9681dc9627b1ea50fd90cdea84290ddc021d3fca

URL: https://github.com/llvm/llvm-project/commit/9681dc9627b1ea50fd90cdea84290ddc021d3fca
DIFF: https://github.com/llvm/llvm-project/commit/9681dc9627b1ea50fd90cdea84290ddc021d3fca.diff

LOG: [PowerPC] Exploit `vrl(b|h|w|d)` to perform vector rotation

Summary:
Currently, we set legalization action of `ISD::ROTL` vectors as
`Expand` in `PPCISelLowering`. However, we can exploit `vrl(b|h|w|d)`
to lower `ISD::ROTL` directly.

Differential Revision: https://reviews.llvm.org/D71324

Added: 
    llvm/test/CodeGen/PowerPC/vector-rotates.ll

Modified: 
    llvm/lib/Target/PowerPC/PPCISelLowering.cpp
    llvm/lib/Target/PowerPC/PPCInstrAltivec.td
    llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index ad7c9ea4e6ca..32a0957f012a 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -714,6 +714,14 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
     if (!Subtarget.hasP8Altivec())
       setOperationAction(ISD::ABS, MVT::v2i64, Expand);
 
+    // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
+    if (Subtarget.hasAltivec())
+      for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
+        setOperationAction(ISD::ROTL, VT, Legal);
+    // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
+    if (Subtarget.hasP8Altivec())
+      setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
+
     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);

diff  --git a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
index f99471b8de3e..21a432e7a539 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -861,6 +861,14 @@ def V_SETALLONES  : VXForm_3<908, (outs vrrc:$vD), (ins),
 def : InstAlias<"vmr $vD, $vA", (VOR vrrc:$vD, vrrc:$vA, vrrc:$vA)>;
 def : InstAlias<"vnot $vD, $vA", (VNOR vrrc:$vD, vrrc:$vA, vrrc:$vA)>;
 
+// Rotates.
+def : Pat<(v16i8 (rotl v16i8:$vA, v16i8:$vB)),
+          (v16i8 (VRLB v16i8:$vA, v16i8:$vB))>;
+def : Pat<(v8i16 (rotl v8i16:$vA, v8i16:$vB)),
+          (v8i16 (VRLH v8i16:$vA, v8i16:$vB))>;
+def : Pat<(v4i32 (rotl v4i32:$vA, v4i32:$vB)),
+          (v4i32 (VRLW v4i32:$vA, v4i32:$vB))>;
+
 // Loads.
 def : Pat<(v4i32 (load xoaddr:$src)), (LVX xoaddr:$src)>;
 
@@ -1159,9 +1167,13 @@ def:Pat<(vmrgew_swapped_shuffle v16i8:$vA, v16i8:$vB),
 def:Pat<(vmrgow_swapped_shuffle v16i8:$vA, v16i8:$vB),
         (VMRGOW $vB, $vA)>;
 
+// Vector rotates.
+def VRLD : VX1_Int_Ty<196, "vrld", int_ppc_altivec_vrld, v2i64>;
+
+def : Pat<(v2i64 (rotl v2i64:$vA, v2i64:$vB)),
+          (v2i64 (VRLD v2i64:$vA, v2i64:$vB))>;
 
 // Vector shifts
-def VRLD : VX1_Int_Ty<196, "vrld", int_ppc_altivec_vrld, v2i64>;
 def VSLD : VXForm_1<1476, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
                     "vsld $vD, $vA, $vB", IIC_VecGeneral, []>;
 def VSRD : VXForm_1<1732, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),

diff  --git a/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll b/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll
index fd5f51c9e9f2..fbbd01faa012 100644
--- a/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll
+++ b/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll
@@ -75,11 +75,7 @@ define i64 @rotl_i64(i64 %x, i64 %z) {
 define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) {
 ; CHECK-LABEL: rotl_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xxlxor 36, 36, 36
-; CHECK-NEXT:    vslw 5, 2, 3
-; CHECK-NEXT:    vsubuwm 3, 4, 3
-; CHECK-NEXT:    vsrw 2, 2, 3
-; CHECK-NEXT:    xxlor 34, 37, 34
+; CHECK-NEXT:    vrlw 2, 2, 3
 ; CHECK-NEXT:    blr
   %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
   ret <4 x i32> %f
@@ -90,13 +86,8 @@ define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) {
 define <4 x i32> @rotl_v4i32_const_shift(<4 x i32> %x) {
 ; CHECK-LABEL: rotl_v4i32_const_shift:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vspltisw 3, -16
-; CHECK-NEXT:    vspltisw 4, 13
-; CHECK-NEXT:    vspltisw 5, 3
-; CHECK-NEXT:    vsubuwm 3, 4, 3
-; CHECK-NEXT:    vslw 4, 2, 5
-; CHECK-NEXT:    vsrw 2, 2, 3
-; CHECK-NEXT:    xxlor 34, 36, 34
+; CHECK-NEXT:    vspltisw 3, 3
+; CHECK-NEXT:    vrlw 2, 2, 3
 ; CHECK-NEXT:    blr
   %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
   ret <4 x i32> %f
@@ -167,10 +158,8 @@ define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) {
 ; CHECK-LABEL: rotr_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xxlxor 36, 36, 36
-; CHECK-NEXT:    vsrw 5, 2, 3
 ; CHECK-NEXT:    vsubuwm 3, 4, 3
-; CHECK-NEXT:    vslw 2, 2, 3
-; CHECK-NEXT:    xxlor 34, 34, 37
+; CHECK-NEXT:    vrlw 2, 2, 3
 ; CHECK-NEXT:    blr
   %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
   ret <4 x i32> %f
@@ -183,11 +172,8 @@ define <4 x i32> @rotr_v4i32_const_shift(<4 x i32> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vspltisw 3, -16
 ; CHECK-NEXT:    vspltisw 4, 13
-; CHECK-NEXT:    vspltisw 5, 3
 ; CHECK-NEXT:    vsubuwm 3, 4, 3
-; CHECK-NEXT:    vsrw 4, 2, 5
-; CHECK-NEXT:    vslw 2, 2, 3
-; CHECK-NEXT:    xxlor 34, 34, 36
+; CHECK-NEXT:    vrlw 2, 2, 3
 ; CHECK-NEXT:    blr
   %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
   ret <4 x i32> %f

diff  --git a/llvm/test/CodeGen/PowerPC/vector-rotates.ll b/llvm/test/CodeGen/PowerPC/vector-rotates.ll
new file mode 100644
index 000000000000..d5fc48173d45
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/vector-rotates.ll
@@ -0,0 +1,136 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O3 -mtriple=powerpc64le-unknown-unknown -ppc-asm-full-reg-names \
+; RUN:   -verify-machineinstrs -mcpu=pwr8 < %s | \
+; RUN:   FileCheck --check-prefix=CHECK-P8 %s
+; RUN: llc -O3 -mtriple=powerpc64-unknown-unknown -ppc-asm-full-reg-names \
+; RUN:   -verify-machineinstrs -mcpu=pwr7 < %s | \
+; RUN:   FileCheck --check-prefix=CHECK-P7 %s
+
+define <16 x i8> @rotl_v16i8(<16 x i8> %a) {
+; CHECK-P8-LABEL: rotl_v16i8:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r3, r2, .LCPI0_0 at toc@ha
+; CHECK-P8-NEXT:    addi r3, r3, .LCPI0_0 at toc@l
+; CHECK-P8-NEXT:    lvx v3, 0, r3
+; CHECK-P8-NEXT:    vrlb v2, v2, v3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P7-LABEL: rotl_v16i8:
+; CHECK-P7:       # %bb.0: # %entry
+; CHECK-P7-NEXT:    addis r3, r2, .LCPI0_0 at toc@ha
+; CHECK-P7-NEXT:    addi r3, r3, .LCPI0_0 at toc@l
+; CHECK-P7-NEXT:    lxvw4x vs35, 0, r3
+; CHECK-P7-NEXT:    vrlb v2, v2, v3
+; CHECK-P7-NEXT:    blr
+entry:
+  %b = shl <16 x i8> %a, <i8 1, i8 1, i8 2, i8 2, i8 3, i8 3, i8 4, i8 4, i8 5, i8 5, i8 6, i8 6, i8 7, i8 7, i8 8, i8 8>
+  %c = lshr <16 x i8> %a, <i8 7, i8 7, i8 6, i8 6, i8 5, i8 5, i8 4, i8 4, i8 3, i8 3, i8 2, i8 2, i8 1, i8 1, i8 0, i8 0>
+  %d = or <16 x i8> %b, %c
+  ret <16 x i8> %d
+}
+
+define <8 x i16> @rotl_v8i16(<8 x i16> %a) {
+; CHECK-P8-LABEL: rotl_v8i16:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r3, r2, .LCPI1_0 at toc@ha
+; CHECK-P8-NEXT:    addi r3, r3, .LCPI1_0 at toc@l
+; CHECK-P8-NEXT:    lvx v3, 0, r3
+; CHECK-P8-NEXT:    vrlh v2, v2, v3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P7-LABEL: rotl_v8i16:
+; CHECK-P7:       # %bb.0: # %entry
+; CHECK-P7-NEXT:    addis r3, r2, .LCPI1_0 at toc@ha
+; CHECK-P7-NEXT:    addi r3, r3, .LCPI1_0 at toc@l
+; CHECK-P7-NEXT:    lxvw4x vs35, 0, r3
+; CHECK-P7-NEXT:    vrlh v2, v2, v3
+; CHECK-P7-NEXT:    blr
+entry:
+  %b = shl <8 x i16> %a, <i16 1, i16 2, i16 3, i16 5, i16 7, i16 11, i16 13, i16 16>
+  %c = lshr <8 x i16> %a, <i16 15, i16 14, i16 13, i16 11, i16 9, i16 5, i16 3, i16 0>
+  %d = or <8 x i16> %b, %c
+  ret <8 x i16> %d
+}
+
+define <4 x i32> @rotl_v4i32_0(<4 x i32> %a) {
+; CHECK-P8-LABEL: rotl_v4i32_0:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r3, r2, .LCPI2_0 at toc@ha
+; CHECK-P8-NEXT:    addi r3, r3, .LCPI2_0 at toc@l
+; CHECK-P8-NEXT:    lvx v3, 0, r3
+; CHECK-P8-NEXT:    vrlw v2, v2, v3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P7-LABEL: rotl_v4i32_0:
+; CHECK-P7:       # %bb.0: # %entry
+; CHECK-P7-NEXT:    addis r3, r2, .LCPI2_0 at toc@ha
+; CHECK-P7-NEXT:    addi r3, r3, .LCPI2_0 at toc@l
+; CHECK-P7-NEXT:    lxvw4x vs35, 0, r3
+; CHECK-P7-NEXT:    vrlw v2, v2, v3
+; CHECK-P7-NEXT:    blr
+entry:
+  %b = shl <4 x i32> %a, <i32 29, i32 19, i32 17, i32 11>
+  %c = lshr <4 x i32> %a, <i32 3, i32 13, i32 15, i32 21>
+  %d = or <4 x i32> %b, %c
+  ret <4 x i32> %d
+}
+
+define <4 x i32> @rotl_v4i32_1(<4 x i32> %a) {
+; CHECK-P8-LABEL: rotl_v4i32_1:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    vspltisw v3, -16
+; CHECK-P8-NEXT:    vspltisw v4, 7
+; CHECK-P8-NEXT:    vsubuwm v3, v4, v3
+; CHECK-P8-NEXT:    vrlw v2, v2, v3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P7-LABEL: rotl_v4i32_1:
+; CHECK-P7:       # %bb.0: # %entry
+; CHECK-P7-NEXT:    vspltisw v3, -16
+; CHECK-P7-NEXT:    vspltisw v4, 7
+; CHECK-P7-NEXT:    vsubuwm v3, v4, v3
+; CHECK-P7-NEXT:    vrlw v2, v2, v3
+; CHECK-P7-NEXT:    blr
+entry:
+  %b = shl <4 x i32> %a, <i32 23, i32 23, i32 23, i32 23>
+  %c = lshr <4 x i32> %a, <i32 9, i32 9, i32 9, i32 9>
+  %d = or <4 x i32> %b, %c
+  ret <4 x i32> %d
+}
+
+define <2 x i64> @rotl_v2i64(<2 x i64> %a) {
+; CHECK-P8-LABEL: rotl_v2i64:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    addis r3, r2, .LCPI4_0 at toc@ha
+; CHECK-P8-NEXT:    addi r3, r3, .LCPI4_0 at toc@l
+; CHECK-P8-NEXT:    lxvd2x vs0, 0, r3
+; CHECK-P8-NEXT:    xxswapd vs35, vs0
+; CHECK-P8-NEXT:    vrld v2, v2, v3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P7-LABEL: rotl_v2i64:
+; CHECK-P7:       # %bb.0: # %entry
+; CHECK-P7-NEXT:    addi r3, r1, -48
+; CHECK-P7-NEXT:    stxvd2x vs34, 0, r3
+; CHECK-P7-NEXT:    ld r3, -40(r1)
+; CHECK-P7-NEXT:    sldi r4, r3, 53
+; CHECK-P7-NEXT:    rldicl r3, r3, 53, 11
+; CHECK-P7-NEXT:    std r4, -8(r1)
+; CHECK-P7-NEXT:    ld r4, -48(r1)
+; CHECK-P7-NEXT:    sldi r5, r4, 41
+; CHECK-P7-NEXT:    rldicl r4, r4, 41, 23
+; CHECK-P7-NEXT:    std r5, -16(r1)
+; CHECK-P7-NEXT:    addi r5, r1, -16
+; CHECK-P7-NEXT:    lxvw4x vs0, 0, r5
+; CHECK-P7-NEXT:    std r3, -24(r1)
+; CHECK-P7-NEXT:    addi r3, r1, -32
+; CHECK-P7-NEXT:    std r4, -32(r1)
+; CHECK-P7-NEXT:    lxvw4x vs1, 0, r3
+; CHECK-P7-NEXT:    xxlor vs34, vs0, vs1
+; CHECK-P7-NEXT:    blr
+entry:
+  %b = shl <2 x i64> %a, <i64 41, i64 53>
+  %c = lshr <2 x i64> %a, <i64 23, i64 11>
+  %d = or <2 x i64> %b, %c
+  ret <2 x i64> %d
+}


        


More information about the llvm-commits mailing list