[llvm] 1b6cccb - [PowerPC][NFC] Testing ROTL of v1i128.

via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 4 03:09:59 PDT 2020


Author: Esme-Yi
Date: 2020-06-04T10:09:06Z
New Revision: 1b6cccba3ec1ab97688c8e0c1da3f8a5a7fa4d17

URL: https://github.com/llvm/llvm-project/commit/1b6cccba3ec1ab97688c8e0c1da3f8a5a7fa4d17
DIFF: https://github.com/llvm/llvm-project/commit/1b6cccba3ec1ab97688c8e0c1da3f8a5a7fa4d17.diff

LOG: [PowerPC][NFC] Testing ROTL of v1i128.

Summary: A bug is reported in bugzilla-45628, where the swap_with_shift case can’t be matched to a single HW instruction xxswapd as expected. In fact the case matches the idiom of rotate, but PPC doesn’t support ROTL v1i128.
This is a NFC patch for testing ROTL with v1i128 at master.

Reviewed By: steven.zhang

Differential Revision: https://reviews.llvm.org/D81073

Added: 
    llvm/test/CodeGen/PowerPC/pr45628.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/PowerPC/pr45628.ll b/llvm/test/CodeGen/PowerPC/pr45628.ll
new file mode 100644
index 000000000000..5b3b16a3d159
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/pr45628.ll
@@ -0,0 +1,385 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN:   -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck  %s \
+; RUN:   -check-prefix=CHECK-VSX
+; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN:   -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs -mattr=-vsx < %s | FileCheck  %s \
+; RUN:   -check-prefix=CHECK-NOVSX
+
+define <1 x i128> @rotl_64(<1 x i128> %num) {
+; CHECK-VSX-LABEL: rotl_64:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI0_0 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI0_0 at toc@l
+; CHECK-VSX-NEXT:    lxvx v3, 0, r3
+; CHECK-VSX-NEXT:    vslo v4, v2, v3
+; CHECK-VSX-NEXT:    vspltb v5, v3, 15
+; CHECK-VSX-NEXT:    vsro v2, v2, v3
+; CHECK-VSX-NEXT:    vsl v4, v4, v5
+; CHECK-VSX-NEXT:    vsr v2, v2, v5
+; CHECK-VSX-NEXT:    xxlor v2, v4, v2
+; CHECK-VSX-NEXT:    blr
+;
+; CHECK-NOVSX-LABEL: rotl_64:
+; CHECK-NOVSX:       # %bb.0: # %entry
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI0_0 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI0_0 at toc@l
+; CHECK-NOVSX-NEXT:    lvx v3, 0, r3
+; CHECK-NOVSX-NEXT:    vslo v4, v2, v3
+; CHECK-NOVSX-NEXT:    vspltb v5, v3, 15
+; CHECK-NOVSX-NEXT:    vsro v2, v2, v3
+; CHECK-NOVSX-NEXT:    vsl v4, v4, v5
+; CHECK-NOVSX-NEXT:    vsr v2, v2, v5
+; CHECK-NOVSX-NEXT:    vor v2, v4, v2
+; CHECK-NOVSX-NEXT:    blr
+entry:
+  %shl = shl <1 x i128> %num, <i128 64>
+  %shr = lshr <1 x i128> %num, <i128 64>
+  %or = or <1 x i128> %shl, %shr
+  ret <1 x i128> %or
+}
+
+define <1 x i128> @rotl_32(<1 x i128> %num) {
+; CHECK-VSX-LABEL: rotl_32:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI1_0 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI1_0 at toc@l
+; CHECK-VSX-NEXT:    lxvx v3, 0, r3
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI1_1 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI1_1 at toc@l
+; CHECK-VSX-NEXT:    vslo v4, v2, v3
+; CHECK-VSX-NEXT:    vspltb v3, v3, 15
+; CHECK-VSX-NEXT:    vsl v3, v4, v3
+; CHECK-VSX-NEXT:    lxvx v4, 0, r3
+; CHECK-VSX-NEXT:    vsro v2, v2, v4
+; CHECK-VSX-NEXT:    vspltb v4, v4, 15
+; CHECK-VSX-NEXT:    vsr v2, v2, v4
+; CHECK-VSX-NEXT:    xxlor v2, v3, v2
+; CHECK-VSX-NEXT:    blr
+;
+; CHECK-NOVSX-LABEL: rotl_32:
+; CHECK-NOVSX:       # %bb.0: # %entry
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI1_0 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI1_0 at toc@l
+; CHECK-NOVSX-NEXT:    lvx v3, 0, r3
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI1_1 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI1_1 at toc@l
+; CHECK-NOVSX-NEXT:    vslo v4, v2, v3
+; CHECK-NOVSX-NEXT:    vspltb v3, v3, 15
+; CHECK-NOVSX-NEXT:    vsl v3, v4, v3
+; CHECK-NOVSX-NEXT:    lvx v4, 0, r3
+; CHECK-NOVSX-NEXT:    vsro v2, v2, v4
+; CHECK-NOVSX-NEXT:    vspltb v4, v4, 15
+; CHECK-NOVSX-NEXT:    vsr v2, v2, v4
+; CHECK-NOVSX-NEXT:    vor v2, v3, v2
+; CHECK-NOVSX-NEXT:    blr
+entry:
+  %shl = shl <1 x i128> %num, <i128 32>
+  %shr = lshr <1 x i128> %num, <i128 96>
+  %or = or <1 x i128> %shl, %shr
+  ret <1 x i128> %or
+}
+
+define <1 x i128> @rotl_96(<1 x i128> %num) {
+; CHECK-VSX-LABEL: rotl_96:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI2_0 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI2_0 at toc@l
+; CHECK-VSX-NEXT:    lxvx v3, 0, r3
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI2_1 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI2_1 at toc@l
+; CHECK-VSX-NEXT:    vslo v4, v2, v3
+; CHECK-VSX-NEXT:    vspltb v3, v3, 15
+; CHECK-VSX-NEXT:    vsl v3, v4, v3
+; CHECK-VSX-NEXT:    lxvx v4, 0, r3
+; CHECK-VSX-NEXT:    vsro v2, v2, v4
+; CHECK-VSX-NEXT:    vspltb v4, v4, 15
+; CHECK-VSX-NEXT:    vsr v2, v2, v4
+; CHECK-VSX-NEXT:    xxlor v2, v3, v2
+; CHECK-VSX-NEXT:    blr
+;
+; CHECK-NOVSX-LABEL: rotl_96:
+; CHECK-NOVSX:       # %bb.0: # %entry
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI2_0 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI2_0 at toc@l
+; CHECK-NOVSX-NEXT:    lvx v3, 0, r3
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI2_1 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI2_1 at toc@l
+; CHECK-NOVSX-NEXT:    vslo v4, v2, v3
+; CHECK-NOVSX-NEXT:    vspltb v3, v3, 15
+; CHECK-NOVSX-NEXT:    vsl v3, v4, v3
+; CHECK-NOVSX-NEXT:    lvx v4, 0, r3
+; CHECK-NOVSX-NEXT:    vsro v2, v2, v4
+; CHECK-NOVSX-NEXT:    vspltb v4, v4, 15
+; CHECK-NOVSX-NEXT:    vsr v2, v2, v4
+; CHECK-NOVSX-NEXT:    vor v2, v3, v2
+; CHECK-NOVSX-NEXT:    blr
+entry:
+  %shl = shl <1 x i128> %num, <i128 96>
+  %shr = lshr <1 x i128> %num, <i128 32>
+  %or = or <1 x i128> %shl, %shr
+  ret <1 x i128> %or
+}
+
+define <1 x i128> @rotl_16(<1 x i128> %num) {
+; CHECK-VSX-LABEL: rotl_16:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI3_0 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI3_0 at toc@l
+; CHECK-VSX-NEXT:    lxvx v3, 0, r3
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI3_1 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI3_1 at toc@l
+; CHECK-VSX-NEXT:    vslo v4, v2, v3
+; CHECK-VSX-NEXT:    vspltb v3, v3, 15
+; CHECK-VSX-NEXT:    vsl v3, v4, v3
+; CHECK-VSX-NEXT:    lxvx v4, 0, r3
+; CHECK-VSX-NEXT:    vsro v2, v2, v4
+; CHECK-VSX-NEXT:    vspltb v4, v4, 15
+; CHECK-VSX-NEXT:    vsr v2, v2, v4
+; CHECK-VSX-NEXT:    xxlor v2, v3, v2
+; CHECK-VSX-NEXT:    blr
+;
+; CHECK-NOVSX-LABEL: rotl_16:
+; CHECK-NOVSX:       # %bb.0: # %entry
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI3_0 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI3_0 at toc@l
+; CHECK-NOVSX-NEXT:    lvx v3, 0, r3
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI3_1 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI3_1 at toc@l
+; CHECK-NOVSX-NEXT:    vslo v4, v2, v3
+; CHECK-NOVSX-NEXT:    vspltb v3, v3, 15
+; CHECK-NOVSX-NEXT:    vsl v3, v4, v3
+; CHECK-NOVSX-NEXT:    lvx v4, 0, r3
+; CHECK-NOVSX-NEXT:    vsro v2, v2, v4
+; CHECK-NOVSX-NEXT:    vspltb v4, v4, 15
+; CHECK-NOVSX-NEXT:    vsr v2, v2, v4
+; CHECK-NOVSX-NEXT:    vor v2, v3, v2
+; CHECK-NOVSX-NEXT:    blr
+entry:
+  %shl = shl <1 x i128> %num, <i128 16>
+  %shr = lshr <1 x i128> %num, <i128 112>
+  %or = or <1 x i128> %shl, %shr
+  ret <1 x i128> %or
+}
+
+define <1 x i128> @rotl_112(<1 x i128> %num) {
+; CHECK-VSX-LABEL: rotl_112:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI4_0 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI4_0 at toc@l
+; CHECK-VSX-NEXT:    lxvx v3, 0, r3
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI4_1 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI4_1 at toc@l
+; CHECK-VSX-NEXT:    vslo v4, v2, v3
+; CHECK-VSX-NEXT:    vspltb v3, v3, 15
+; CHECK-VSX-NEXT:    vsl v3, v4, v3
+; CHECK-VSX-NEXT:    lxvx v4, 0, r3
+; CHECK-VSX-NEXT:    vsro v2, v2, v4
+; CHECK-VSX-NEXT:    vspltb v4, v4, 15
+; CHECK-VSX-NEXT:    vsr v2, v2, v4
+; CHECK-VSX-NEXT:    xxlor v2, v3, v2
+; CHECK-VSX-NEXT:    blr
+;
+; CHECK-NOVSX-LABEL: rotl_112:
+; CHECK-NOVSX:       # %bb.0: # %entry
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI4_0 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI4_0 at toc@l
+; CHECK-NOVSX-NEXT:    lvx v3, 0, r3
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI4_1 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI4_1 at toc@l
+; CHECK-NOVSX-NEXT:    vslo v4, v2, v3
+; CHECK-NOVSX-NEXT:    vspltb v3, v3, 15
+; CHECK-NOVSX-NEXT:    vsl v3, v4, v3
+; CHECK-NOVSX-NEXT:    lvx v4, 0, r3
+; CHECK-NOVSX-NEXT:    vsro v2, v2, v4
+; CHECK-NOVSX-NEXT:    vspltb v4, v4, 15
+; CHECK-NOVSX-NEXT:    vsr v2, v2, v4
+; CHECK-NOVSX-NEXT:    vor v2, v3, v2
+; CHECK-NOVSX-NEXT:    blr
+entry:
+  %shl = shl <1 x i128> %num, <i128 112>
+  %shr = lshr <1 x i128> %num, <i128 16>
+  %or = or <1 x i128> %shl, %shr
+  ret <1 x i128> %or
+}
+
+define <1 x i128> @rotl_8(<1 x i128> %num) {
+; CHECK-VSX-LABEL: rotl_8:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI5_0 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI5_0 at toc@l
+; CHECK-VSX-NEXT:    lxvx v3, 0, r3
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI5_1 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI5_1 at toc@l
+; CHECK-VSX-NEXT:    vslo v4, v2, v3
+; CHECK-VSX-NEXT:    vspltb v3, v3, 15
+; CHECK-VSX-NEXT:    vsl v3, v4, v3
+; CHECK-VSX-NEXT:    lxvx v4, 0, r3
+; CHECK-VSX-NEXT:    vsro v2, v2, v4
+; CHECK-VSX-NEXT:    vspltb v4, v4, 15
+; CHECK-VSX-NEXT:    vsr v2, v2, v4
+; CHECK-VSX-NEXT:    xxlor v2, v3, v2
+; CHECK-VSX-NEXT:    blr
+;
+; CHECK-NOVSX-LABEL: rotl_8:
+; CHECK-NOVSX:       # %bb.0: # %entry
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI5_0 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI5_0 at toc@l
+; CHECK-NOVSX-NEXT:    lvx v3, 0, r3
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI5_1 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI5_1 at toc@l
+; CHECK-NOVSX-NEXT:    vslo v4, v2, v3
+; CHECK-NOVSX-NEXT:    vspltb v3, v3, 15
+; CHECK-NOVSX-NEXT:    vsl v3, v4, v3
+; CHECK-NOVSX-NEXT:    lvx v4, 0, r3
+; CHECK-NOVSX-NEXT:    vsro v2, v2, v4
+; CHECK-NOVSX-NEXT:    vspltb v4, v4, 15
+; CHECK-NOVSX-NEXT:    vsr v2, v2, v4
+; CHECK-NOVSX-NEXT:    vor v2, v3, v2
+; CHECK-NOVSX-NEXT:    blr
+entry:
+  %shl = shl <1 x i128> %num, <i128 8>
+  %shr = lshr <1 x i128> %num, <i128 120>
+  %or = or <1 x i128> %shl, %shr
+  ret <1 x i128> %or
+}
+
+define <1 x i128> @rotl_120(<1 x i128> %num) {
+; CHECK-VSX-LABEL: rotl_120:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI6_0 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI6_0 at toc@l
+; CHECK-VSX-NEXT:    lxvx v3, 0, r3
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI6_1 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI6_1 at toc@l
+; CHECK-VSX-NEXT:    vslo v4, v2, v3
+; CHECK-VSX-NEXT:    vspltb v3, v3, 15
+; CHECK-VSX-NEXT:    vsl v3, v4, v3
+; CHECK-VSX-NEXT:    lxvx v4, 0, r3
+; CHECK-VSX-NEXT:    vsro v2, v2, v4
+; CHECK-VSX-NEXT:    vspltb v4, v4, 15
+; CHECK-VSX-NEXT:    vsr v2, v2, v4
+; CHECK-VSX-NEXT:    xxlor v2, v3, v2
+; CHECK-VSX-NEXT:    blr
+;
+; CHECK-NOVSX-LABEL: rotl_120:
+; CHECK-NOVSX:       # %bb.0: # %entry
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI6_0 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI6_0 at toc@l
+; CHECK-NOVSX-NEXT:    lvx v3, 0, r3
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI6_1 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI6_1 at toc@l
+; CHECK-NOVSX-NEXT:    vslo v4, v2, v3
+; CHECK-NOVSX-NEXT:    vspltb v3, v3, 15
+; CHECK-NOVSX-NEXT:    vsl v3, v4, v3
+; CHECK-NOVSX-NEXT:    lvx v4, 0, r3
+; CHECK-NOVSX-NEXT:    vsro v2, v2, v4
+; CHECK-NOVSX-NEXT:    vspltb v4, v4, 15
+; CHECK-NOVSX-NEXT:    vsr v2, v2, v4
+; CHECK-NOVSX-NEXT:    vor v2, v3, v2
+; CHECK-NOVSX-NEXT:    blr
+entry:
+  %shl = shl <1 x i128> %num, <i128 120>
+  %shr = lshr <1 x i128> %num, <i128 8>
+  %or = or <1 x i128> %shl, %shr
+  ret <1 x i128> %or
+}
+
+define <1 x i128> @rotl_28(<1 x i128> %num) {
+; CHECK-VSX-LABEL: rotl_28:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI7_0 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI7_0 at toc@l
+; CHECK-VSX-NEXT:    lxvx v3, 0, r3
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI7_1 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI7_1 at toc@l
+; CHECK-VSX-NEXT:    vslo v4, v2, v3
+; CHECK-VSX-NEXT:    vspltb v3, v3, 15
+; CHECK-VSX-NEXT:    vsl v3, v4, v3
+; CHECK-VSX-NEXT:    lxvx v4, 0, r3
+; CHECK-VSX-NEXT:    vsro v2, v2, v4
+; CHECK-VSX-NEXT:    vspltb v4, v4, 15
+; CHECK-VSX-NEXT:    vsr v2, v2, v4
+; CHECK-VSX-NEXT:    xxlor v2, v3, v2
+; CHECK-VSX-NEXT:    blr
+;
+; CHECK-NOVSX-LABEL: rotl_28:
+; CHECK-NOVSX:       # %bb.0: # %entry
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI7_0 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI7_0 at toc@l
+; CHECK-NOVSX-NEXT:    lvx v3, 0, r3
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI7_1 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI7_1 at toc@l
+; CHECK-NOVSX-NEXT:    vslo v4, v2, v3
+; CHECK-NOVSX-NEXT:    vspltb v3, v3, 15
+; CHECK-NOVSX-NEXT:    vsl v3, v4, v3
+; CHECK-NOVSX-NEXT:    lvx v4, 0, r3
+; CHECK-NOVSX-NEXT:    vsro v2, v2, v4
+; CHECK-NOVSX-NEXT:    vspltb v4, v4, 15
+; CHECK-NOVSX-NEXT:    vsr v2, v2, v4
+; CHECK-NOVSX-NEXT:    vor v2, v3, v2
+; CHECK-NOVSX-NEXT:    blr
+entry:
+  %shl = shl <1 x i128> %num, <i128 28>
+  %shr = lshr <1 x i128> %num, <i128 100>
+  %or = or <1 x i128> %shl, %shr
+  ret <1 x i128> %or
+}
+
+define <1 x i128> @NO_rotl(<1 x i128> %num) {
+; CHECK-VSX-LABEL: NO_rotl:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI8_0 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI8_0 at toc@l
+; CHECK-VSX-NEXT:    lxvx v3, 0, r3
+; CHECK-VSX-NEXT:    addis r3, r2, .LCPI8_1 at toc@ha
+; CHECK-VSX-NEXT:    addi r3, r3, .LCPI8_1 at toc@l
+; CHECK-VSX-NEXT:    vslo v4, v2, v3
+; CHECK-VSX-NEXT:    vspltb v3, v3, 15
+; CHECK-VSX-NEXT:    vsl v3, v4, v3
+; CHECK-VSX-NEXT:    lxvx v4, 0, r3
+; CHECK-VSX-NEXT:    vsro v2, v2, v4
+; CHECK-VSX-NEXT:    vspltb v4, v4, 15
+; CHECK-VSX-NEXT:    vsr v2, v2, v4
+; CHECK-VSX-NEXT:    xxlor v2, v3, v2
+; CHECK-VSX-NEXT:    blr
+;
+; CHECK-NOVSX-LABEL: NO_rotl:
+; CHECK-NOVSX:       # %bb.0: # %entry
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI8_0 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI8_0 at toc@l
+; CHECK-NOVSX-NEXT:    lvx v3, 0, r3
+; CHECK-NOVSX-NEXT:    addis r3, r2, .LCPI8_1 at toc@ha
+; CHECK-NOVSX-NEXT:    addi r3, r3, .LCPI8_1 at toc@l
+; CHECK-NOVSX-NEXT:    vslo v4, v2, v3
+; CHECK-NOVSX-NEXT:    vspltb v3, v3, 15
+; CHECK-NOVSX-NEXT:    vsl v3, v4, v3
+; CHECK-NOVSX-NEXT:    lvx v4, 0, r3
+; CHECK-NOVSX-NEXT:    vsro v2, v2, v4
+; CHECK-NOVSX-NEXT:    vspltb v4, v4, 15
+; CHECK-NOVSX-NEXT:    vsr v2, v2, v4
+; CHECK-NOVSX-NEXT:    vor v2, v3, v2
+; CHECK-NOVSX-NEXT:    blr
+entry:
+  %shl = shl <1 x i128> %num, <i128 20>
+  %shr = lshr <1 x i128> %num, <i128 100>
+  %or = or <1 x i128> %shl, %shr
+  ret <1 x i128> %or
+}
+
+define <1 x i128> @shufflevector(<1 x i128> %num) {
+; CHECK-VSX-LABEL: shufflevector:
+; CHECK-VSX:       # %bb.0: # %entry
+; CHECK-VSX-NEXT:    xxswapd v2, v2
+; CHECK-VSX-NEXT:    blr
+;
+; CHECK-NOVSX-LABEL: shufflevector:
+; CHECK-NOVSX:       # %bb.0: # %entry
+; CHECK-NOVSX-NEXT:    vsldoi v2, v2, v2, 8
+; CHECK-NOVSX-NEXT:    blr
+entry:
+  %0 = bitcast <1 x i128> %num to <2 x i64>
+  %vecins2 = shufflevector <2 x i64> %0, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
+  %1 = bitcast <2 x i64> %vecins2 to <1 x i128>
+  ret <1 x i128> %1
+}


        


More information about the llvm-commits mailing list