[llvm] [PPC] generate stxvw4x/lxvw4x on P7 (PR #87049)

Chen Zheng via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 1 23:44:20 PDT 2024


https://github.com/chenzheng1030 updated https://github.com/llvm/llvm-project/pull/87049

>From 8feaf8a1917848f75a9b7efcabab2b802e14b72c Mon Sep 17 00:00:00 2001
From: Chen Zheng <czhengsz at cn.ibm.com>
Date: Fri, 29 Mar 2024 06:26:24 -0400
Subject: [PATCH 1/2] [PPC] generate stxvw4x/lxvw4x on P7

---
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp   |   9 +-
 .../CodeGen/PowerPC/aix-vec-arg-spills-mir.ll |  61 +++++-----
 .../CodeGen/PowerPC/aix-vec-arg-spills.ll     | 104 +++++++++---------
 llvm/test/CodeGen/PowerPC/memcpy-vec.ll       |  11 +-
 .../test/CodeGen/PowerPC/unal-altivec-wint.ll |   6 +-
 llvm/test/CodeGen/PowerPC/unal-altivec2.ll    |   2 +-
 llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll    |  60 ++--------
 .../test/CodeGen/PowerPC/unal-vec-negarith.ll |   2 +-
 llvm/test/CodeGen/PowerPC/unaligned.ll        |   8 +-
 llvm/test/CodeGen/PowerPC/vsx.ll              |  18 +--
 10 files changed, 106 insertions(+), 175 deletions(-)

diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 7436b202fba0d9..289e0bc29c4a55 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15965,8 +15965,8 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
     if (LD->isUnindexed() && VT.isVector() &&
         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
-          // P8 and later hardware should just use LOAD.
-          !Subtarget.hasP8Vector() &&
+          // Hardware has VSX should just use LOAD.
+          !Subtarget.hasVSX() &&
           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
            VT == MVT::v4f32))) &&
         LD->getAlign() < ABIAlignment) {
@@ -17250,8 +17250,7 @@ bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
 EVT PPCTargetLowering::getOptimalMemOpType(
     const MemOp &Op, const AttributeList &FuncAttributes) const {
   if (getTargetMachine().getOptLevel() != CodeGenOptLevel::None) {
-    // We should use Altivec/VSX loads and stores when available. For unaligned
-    // addresses, unaligned VSX loads are only fast starting with the P8.
+    // We should use Altivec/VSX loads and stores when available.
     if (Subtarget.hasAltivec() && Op.size() >= 16) {
       if (Op.isMemset() && Subtarget.hasVSX()) {
         uint64_t TailSize = Op.size() % 16;
@@ -17263,7 +17262,7 @@ EVT PPCTargetLowering::getOptimalMemOpType(
         }
         return MVT::v4i32;
       }
-      if (Op.isAligned(Align(16)) || Subtarget.hasP8Vector())
+      if (Op.isAligned(Align(16)) || Subtarget.hasVSX())
         return MVT::v4i32;
     }
   }
diff --git a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
index 7c45958a1c2ff9..d927b9edb74d1d 100644
--- a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=+altivec -vec-extabi \
 ; RUN:     -stop-after=machine-cp -mtriple powerpc-ibm-aix-xcoff < %s | \
 ; RUN:   FileCheck %s --check-prefix=MIR32
@@ -12,21 +12,16 @@
 @__const.caller.t = private unnamed_addr constant %struct.Test { double 0.000000e+00, double 1.000000e+00, double 2.000000e+00, double 3.000000e+00 }, align 8
 
 define double @caller() {
-
   ; MIR32-LABEL: name: caller
   ; MIR32: bb.0.entry:
-  ; MIR32-NEXT:   renamable $r3 = LI 0
-  ; MIR32-NEXT:   renamable $r4 = LIS 16392
-  ; MIR32-NEXT:   STW killed renamable $r4, 180, $r1 :: (store (s32) into unknown-address + 24)
-  ; MIR32-NEXT:   renamable $r4 = LIS 16384
-  ; MIR32-NEXT:   STW renamable $r3, 184, $r1 :: (store (s32) into unknown-address + 28)
-  ; MIR32-NEXT:   STW renamable $r3, 176, $r1 :: (store (s32) into unknown-address + 20)
-  ; MIR32-NEXT:   STW killed renamable $r4, 172, $r1 :: (store (s32) into unknown-address + 16)
-  ; MIR32-NEXT:   STW renamable $r3, 168, $r1 :: (store (s32) into unknown-address + 12)
-  ; MIR32-NEXT:   renamable $r4 = LIS 16368
-  ; MIR32-NEXT:   STW killed renamable $r4, 164, $r1 :: (store (s32) into unknown-address + 8)
-  ; MIR32-NEXT:   STW renamable $r3, 160, $r1 :: (store (s32) into unknown-address + 4)
-  ; MIR32-NEXT:   STW killed renamable $r3, 156, $r1 :: (store (s32))
+  ; MIR32-NEXT:   renamable $r3 = LWZtoc @__const.caller.t, $r2 :: (load (s32) from got)
+  ; MIR32-NEXT:   renamable $r4 = LI 16
+  ; MIR32-NEXT:   renamable $vsl0 = LXVW4X renamable $r3, killed renamable $r4 :: (load (s128) from unknown-address + 16, align 8)
+  ; MIR32-NEXT:   renamable $r4 = LI 172
+  ; MIR32-NEXT:   STXVW4X killed renamable $vsl0, $r1, killed renamable $r4 :: (store (s128) into unknown-address + 16, align 4)
+  ; MIR32-NEXT:   renamable $vsl0 = LXVW4X $zero, killed renamable $r3 :: (load (s128), align 8)
+  ; MIR32-NEXT:   renamable $r3 = LI 156
+  ; MIR32-NEXT:   STXVW4X killed renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 4)
   ; MIR32-NEXT:   ADJCALLSTACKDOWN 188, 0, implicit-def dead $r1, implicit $r1
   ; MIR32-NEXT:   renamable $vsl0 = XXLXORz
   ; MIR32-NEXT:   renamable $r3 = LI 136
@@ -78,32 +73,30 @@ define double @caller() {
   ;
   ; MIR64-LABEL: name: caller
   ; MIR64: bb.0.entry:
-  ; MIR64-NEXT:   renamable $x3 = LI8 2049
-  ; MIR64-NEXT:   renamable $x4 = LI8 1
-  ; MIR64-NEXT:   renamable $x3 = RLDIC killed renamable $x3, 51, 1
-  ; MIR64-NEXT:   STD killed renamable $x3, 216, $x1 :: (store (s64) into unknown-address + 24, align 4)
-  ; MIR64-NEXT:   renamable $x3 = LI8 1023
-  ; MIR64-NEXT:   renamable $x4 = RLDIC killed renamable $x4, 62, 1
-  ; MIR64-NEXT:   STD killed renamable $x4, 208, $x1 :: (store (s64) into unknown-address + 16, align 4)
-  ; MIR64-NEXT:   renamable $x4 = LI8 0
-  ; MIR64-NEXT:   STD renamable $x4, 192, $x1 :: (store (s64), align 4)
-  ; MIR64-NEXT:   renamable $x3 = RLDIC killed renamable $x3, 52, 2
-  ; MIR64-NEXT:   STD killed renamable $x3, 200, $x1 :: (store (s64) into unknown-address + 8, align 4)
+  ; MIR64-NEXT:   renamable $x3 = LDtoc @__const.caller.t, $x2 :: (load (s64) from got)
+  ; MIR64-NEXT:   renamable $x4 = LI8 16
+  ; MIR64-NEXT:   renamable $vsl0 = LXVW4X renamable $x3, killed renamable $x4 :: (load (s128) from unknown-address + 16, align 8)
+  ; MIR64-NEXT:   renamable $x4 = LI8 208
+  ; MIR64-NEXT:   STXVW4X killed renamable $vsl0, $x1, killed renamable $x4 :: (store (s128) into unknown-address + 16, align 4)
+  ; MIR64-NEXT:   renamable $vsl0 = LXVW4X $zero8, killed renamable $x3 :: (load (s128), align 8)
+  ; MIR64-NEXT:   renamable $x3 = LI8 192
+  ; MIR64-NEXT:   STXVW4X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 4)
   ; MIR64-NEXT:   ADJCALLSTACKDOWN 224, 0, implicit-def dead $r1, implicit $r1
   ; MIR64-NEXT:   renamable $vsl0 = XXLXORz
   ; MIR64-NEXT:   renamable $x3 = LI8 160
+  ; MIR64-NEXT:   renamable $x4 = LI8 144
   ; MIR64-NEXT:   STXVW4X renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8)
-  ; MIR64-NEXT:   renamable $x3 = LI8 144
-  ; MIR64-NEXT:   STXVW4X renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8)
+  ; MIR64-NEXT:   STXVW4X renamable $vsl0, $x1, killed renamable $x4 :: (store (s128), align 8)
   ; MIR64-NEXT:   renamable $x3 = LI8 128
+  ; MIR64-NEXT:   renamable $x4 = LDtocCPT %const.0, $x2 :: (load (s64) from got)
   ; MIR64-NEXT:   STXVW4X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8)
-  ; MIR64-NEXT:   renamable $x3 = LDtocCPT %const.0, $x2 :: (load (s64) from got)
-  ; MIR64-NEXT:   renamable $vsl0 = LXVD2X $zero8, killed renamable $x3 :: (load (s128) from constant-pool)
   ; MIR64-NEXT:   renamable $x3 = LI8 80
+  ; MIR64-NEXT:   renamable $vsl0 = LXVD2X $zero8, killed renamable $x4 :: (load (s128) from constant-pool)
+  ; MIR64-NEXT:   renamable $x4 = LI8 512
   ; MIR64-NEXT:   STXVD2X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128))
-  ; MIR64-NEXT:   renamable $x3 = LI8 512
-  ; MIR64-NEXT:   STD killed renamable $x3, 184, $x1 :: (store (s64))
-  ; MIR64-NEXT:   STD killed renamable $x4, 176, $x1 :: (store (s64))
+  ; MIR64-NEXT:   renamable $x3 = LI8 0
+  ; MIR64-NEXT:   STD killed renamable $x4, 184, $x1 :: (store (s64))
+  ; MIR64-NEXT:   STD killed renamable $x3, 176, $x1 :: (store (s64))
   ; MIR64-NEXT:   $f1 = XXLXORdpz
   ; MIR64-NEXT:   $f2 = XXLXORdpz
   ; MIR64-NEXT:   $v2 = XXLXORz
@@ -112,8 +105,8 @@ define double @caller() {
   ; MIR64-NEXT:   $v5 = XXLXORz
   ; MIR64-NEXT:   $v6 = XXLXORz
   ; MIR64-NEXT:   $x3 = LI8 128
-  ; MIR64-NEXT:   $x4 = LI8 256
   ; MIR64-NEXT:   $v7 = XXLXORz
+  ; MIR64-NEXT:   $x4 = LI8 256
   ; MIR64-NEXT:   $v8 = XXLXORz
   ; MIR64-NEXT:   $v9 = XXLXORz
   ; MIR64-NEXT:   $v10 = XXLXORz
@@ -136,7 +129,7 @@ define double @caller() {
   ; MIR64-NEXT:   BLR8 implicit $lr8, implicit $rm, implicit $f1
   entry:
     %call = tail call double @callee(i32 signext 128, i32 signext 256, double 0.000000e+00, double 0.000000e+00, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 2.400000e+01, double 2.500000e+01>, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, i32 signext 512, ptr nonnull byval(%struct.Test) align 4 @__const.caller.t)
-      ret double %call
+  ret double %call
 }
 
 declare double @callee(i32 signext, i32 signext, double, double, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, double, double, double, double, double, double, double, double, double, double, double, i32 signext, ptr byval(%struct.Test) align 8)
diff --git a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll
index 66f88b4e3d5ab3..91e7d4094fc344 100644
--- a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll
@@ -15,56 +15,52 @@ define double @caller() {
 ; 32BIT:       # %bb.0: # %entry
 ; 32BIT-NEXT:    mflr 0
 ; 32BIT-NEXT:    stwu 1, -192(1)
-; 32BIT-NEXT:    lis 4, 16392
+; 32BIT-NEXT:    lwz 3, L..C0(2) # @__const.caller.t
+; 32BIT-NEXT:    li 4, 16
 ; 32BIT-NEXT:    stw 0, 200(1)
-; 32BIT-NEXT:    li 3, 0
-; 32BIT-NEXT:    xxlxor 0, 0, 0
 ; 32BIT-NEXT:    xxlxor 1, 1, 1
-; 32BIT-NEXT:    stw 4, 180(1)
-; 32BIT-NEXT:    lis 4, 16384
-; 32BIT-NEXT:    stw 3, 184(1)
-; 32BIT-NEXT:    stw 3, 176(1)
-; 32BIT-NEXT:    stw 4, 172(1)
-; 32BIT-NEXT:    lis 4, 16368
-; 32BIT-NEXT:    stw 3, 168(1)
-; 32BIT-NEXT:    stw 3, 160(1)
-; 32BIT-NEXT:    stw 4, 164(1)
-; 32BIT-NEXT:    stw 3, 156(1)
-; 32BIT-NEXT:    li 3, 136
-; 32BIT-NEXT:    li 4, 120
 ; 32BIT-NEXT:    xxlxor 2, 2, 2
-; 32BIT-NEXT:    stxvw4x 0, 1, 3
-; 32BIT-NEXT:    li 3, 104
-; 32BIT-NEXT:    stxvw4x 0, 1, 4
-; 32BIT-NEXT:    li 4, 88
-; 32BIT-NEXT:    stxvw4x 0, 1, 3
-; 32BIT-NEXT:    stxvw4x 0, 1, 4
-; 32BIT-NEXT:    lwz 4, L..C0(2) # %const.0
-; 32BIT-NEXT:    li 3, 72
-; 32BIT-NEXT:    stxvw4x 0, 1, 3
-; 32BIT-NEXT:    li 3, 48
+; 32BIT-NEXT:    lxvw4x 0, 3, 4
+; 32BIT-NEXT:    li 4, 172
 ; 32BIT-NEXT:    xxlxor 34, 34, 34
 ; 32BIT-NEXT:    xxlxor 35, 35, 35
-; 32BIT-NEXT:    lxvd2x 0, 0, 4
-; 32BIT-NEXT:    li 4, 512
+; 32BIT-NEXT:    stxvw4x 0, 1, 4
+; 32BIT-NEXT:    li 4, 120
 ; 32BIT-NEXT:    xxlxor 36, 36, 36
 ; 32BIT-NEXT:    xxlxor 37, 37, 37
 ; 32BIT-NEXT:    xxlxor 38, 38, 38
+; 32BIT-NEXT:    lxvw4x 0, 0, 3
+; 32BIT-NEXT:    li 3, 156
 ; 32BIT-NEXT:    xxlxor 39, 39, 39
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    xxlxor 0, 0, 0
+; 32BIT-NEXT:    li 3, 136
 ; 32BIT-NEXT:    xxlxor 40, 40, 40
 ; 32BIT-NEXT:    xxlxor 41, 41, 41
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    li 3, 104
+; 32BIT-NEXT:    stxvw4x 0, 1, 4
+; 32BIT-NEXT:    li 4, 88
 ; 32BIT-NEXT:    xxlxor 42, 42, 42
-; 32BIT-NEXT:    stxvd2x 0, 1, 3
-; 32BIT-NEXT:    stw 4, 152(1)
-; 32BIT-NEXT:    li 3, 128
-; 32BIT-NEXT:    li 4, 256
 ; 32BIT-NEXT:    xxlxor 43, 43, 43
 ; 32BIT-NEXT:    xxlxor 44, 44, 44
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    stxvw4x 0, 1, 4
+; 32BIT-NEXT:    lwz 4, L..C1(2) # %const.0
+; 32BIT-NEXT:    li 3, 72
 ; 32BIT-NEXT:    xxlxor 45, 45, 45
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    li 3, 48
 ; 32BIT-NEXT:    xxlxor 3, 3, 3
 ; 32BIT-NEXT:    xxlxor 4, 4, 4
+; 32BIT-NEXT:    lxvd2x 0, 0, 4
+; 32BIT-NEXT:    li 4, 512
 ; 32BIT-NEXT:    xxlxor 5, 5, 5
 ; 32BIT-NEXT:    xxlxor 6, 6, 6
+; 32BIT-NEXT:    stxvd2x 0, 1, 3
+; 32BIT-NEXT:    stw 4, 152(1)
+; 32BIT-NEXT:    li 3, 128
+; 32BIT-NEXT:    li 4, 256
 ; 32BIT-NEXT:    xxlxor 7, 7, 7
 ; 32BIT-NEXT:    xxlxor 8, 8, 8
 ; 32BIT-NEXT:    xxlxor 9, 9, 9
@@ -83,54 +79,52 @@ define double @caller() {
 ; 64BIT:       # %bb.0: # %entry
 ; 64BIT-NEXT:    mflr 0
 ; 64BIT-NEXT:    stdu 1, -224(1)
-; 64BIT-NEXT:    li 3, 2049
+; 64BIT-NEXT:    ld 3, L..C0(2) # @__const.caller.t
+; 64BIT-NEXT:    li 4, 16
 ; 64BIT-NEXT:    std 0, 240(1)
-; 64BIT-NEXT:    li 4, 1
-; 64BIT-NEXT:    xxlxor 0, 0, 0
 ; 64BIT-NEXT:    xxlxor 1, 1, 1
-; 64BIT-NEXT:    rldic 3, 3, 51, 1
-; 64BIT-NEXT:    rldic 4, 4, 62, 1
 ; 64BIT-NEXT:    xxlxor 2, 2, 2
+; 64BIT-NEXT:    lxvw4x 0, 3, 4
+; 64BIT-NEXT:    li 4, 208
 ; 64BIT-NEXT:    xxlxor 34, 34, 34
-; 64BIT-NEXT:    std 3, 216(1)
-; 64BIT-NEXT:    li 3, 1023
-; 64BIT-NEXT:    std 4, 208(1)
-; 64BIT-NEXT:    li 4, 0
 ; 64BIT-NEXT:    xxlxor 35, 35, 35
+; 64BIT-NEXT:    stxvw4x 0, 1, 4
+; 64BIT-NEXT:    li 4, 144
 ; 64BIT-NEXT:    xxlxor 36, 36, 36
-; 64BIT-NEXT:    rldic 3, 3, 52, 2
-; 64BIT-NEXT:    std 4, 192(1)
 ; 64BIT-NEXT:    xxlxor 37, 37, 37
 ; 64BIT-NEXT:    xxlxor 38, 38, 38
+; 64BIT-NEXT:    lxvw4x 0, 0, 3
+; 64BIT-NEXT:    li 3, 192
 ; 64BIT-NEXT:    xxlxor 39, 39, 39
-; 64BIT-NEXT:    std 3, 200(1)
+; 64BIT-NEXT:    stxvw4x 0, 1, 3
+; 64BIT-NEXT:    xxlxor 0, 0, 0
 ; 64BIT-NEXT:    li 3, 160
 ; 64BIT-NEXT:    xxlxor 40, 40, 40
-; 64BIT-NEXT:    stxvw4x 0, 1, 3
-; 64BIT-NEXT:    li 3, 144
 ; 64BIT-NEXT:    xxlxor 41, 41, 41
-; 64BIT-NEXT:    xxlxor 42, 42, 42
 ; 64BIT-NEXT:    stxvw4x 0, 1, 3
+; 64BIT-NEXT:    stxvw4x 0, 1, 4
+; 64BIT-NEXT:    ld 4, L..C1(2) # %const.0
 ; 64BIT-NEXT:    li 3, 128
+; 64BIT-NEXT:    xxlxor 42, 42, 42
 ; 64BIT-NEXT:    xxlxor 43, 43, 43
 ; 64BIT-NEXT:    stxvw4x 0, 1, 3
-; 64BIT-NEXT:    ld 3, L..C0(2) # %const.0
+; 64BIT-NEXT:    li 3, 80
 ; 64BIT-NEXT:    xxlxor 44, 44, 44
 ; 64BIT-NEXT:    xxlxor 45, 45, 45
-; 64BIT-NEXT:    lxvd2x 0, 0, 3
-; 64BIT-NEXT:    li 3, 80
+; 64BIT-NEXT:    lxvd2x 0, 0, 4
+; 64BIT-NEXT:    li 4, 512
 ; 64BIT-NEXT:    xxlxor 3, 3, 3
-; 64BIT-NEXT:    xxlxor 4, 4, 4
-; 64BIT-NEXT:    xxlxor 5, 5, 5
 ; 64BIT-NEXT:    stxvd2x 0, 1, 3
-; 64BIT-NEXT:    li 3, 512
-; 64BIT-NEXT:    std 4, 176(1)
+; 64BIT-NEXT:    li 3, 0
+; 64BIT-NEXT:    std 4, 184(1)
 ; 64BIT-NEXT:    li 4, 256
+; 64BIT-NEXT:    xxlxor 4, 4, 4
+; 64BIT-NEXT:    xxlxor 5, 5, 5
 ; 64BIT-NEXT:    xxlxor 6, 6, 6
+; 64BIT-NEXT:    std 3, 176(1)
+; 64BIT-NEXT:    li 3, 128
 ; 64BIT-NEXT:    xxlxor 7, 7, 7
 ; 64BIT-NEXT:    xxlxor 8, 8, 8
-; 64BIT-NEXT:    std 3, 184(1)
-; 64BIT-NEXT:    li 3, 128
 ; 64BIT-NEXT:    xxlxor 9, 9, 9
 ; 64BIT-NEXT:    xxlxor 10, 10, 10
 ; 64BIT-NEXT:    xxlxor 11, 11, 11
diff --git a/llvm/test/CodeGen/PowerPC/memcpy-vec.ll b/llvm/test/CodeGen/PowerPC/memcpy-vec.ll
index d636921eea3e51..34a7af4bc45916 100644
--- a/llvm/test/CodeGen/PowerPC/memcpy-vec.ll
+++ b/llvm/test/CodeGen/PowerPC/memcpy-vec.ll
@@ -10,12 +10,8 @@ entry:
   ret void
 
 ; PWR7-LABEL: @foo1
-; PWR7-NOT: bl memcpy
-; PWR7-DAG: li [[OFFSET:[0-9]+]], 16
-; PWR7-DAG: lxvd2x [[TMP0:[0-9]+]], 4, [[OFFSET]]
-; PWR7-DAG: stxvd2x [[TMP0]], 3, [[OFFSET]]
-; PWR7-DAG: lxvd2x [[TMP1:[0-9]+]], 0, 4
-; PWR7-DAG: stxvd2x [[TMP1]], 0, 3
+; PWR7: lxvw4x
+; PWR7: stxvw4x
 ; PWR7: blr
 
 ; PWR8-LABEL: @foo1
@@ -34,7 +30,8 @@ entry:
   ret void
 
 ; PWR7-LABEL: @foo2
-; PWR7: bl memcpy
+; PWR7: lxvw4x
+; PWR7: stxvw4x
 ; PWR7: blr
 
 ; PWR8-LABEL: @foo2
diff --git a/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll b/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll
index d6244cd828e5a6..a590f54b5a6765 100644
--- a/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll
@@ -17,7 +17,7 @@ entry:
 ; CHECK-LABEL: @test1
 ; CHECK: li [[REG:[0-9]+]], 16
 ; CHECK-NOT: li {{[0-9]+}}, 15
-; CHECK-DAG: lvx {{[0-9]+}}, 0, 3
+; CHECK-DAG: lxvw4x {{[0-9]+}}, 0, 3
 ; CHECK-DAG: lvx {{[0-9]+}}, 3, [[REG]]
 ; CHECK: blr
 }
@@ -36,8 +36,8 @@ entry:
 ; CHECK-LABEL: @test2
 ; CHECK: li [[REG:[0-9]+]], 16
 ; CHECK-NOT: li {{[0-9]+}}, 15
-; CHECK-DAG: lvx {{[0-9]+}}, 0, 3
-; CHECK-DAG: lvx {{[0-9]+}}, 3, [[REG]]
+; CHECK-DAG: stvx 2, 3, [[REG]]
+; CHECK-DAG: lxvw4x {{[0-9]+}}, 0, 3
 ; CHECK: blr
 }
 
diff --git a/llvm/test/CodeGen/PowerPC/unal-altivec2.ll b/llvm/test/CodeGen/PowerPC/unal-altivec2.ll
index fafcab8468eb4d..39a82fe0a0977c 100644
--- a/llvm/test/CodeGen/PowerPC/unal-altivec2.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-altivec2.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr6 < %s | FileCheck %s
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
diff --git a/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll b/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll
index b0ed395fc3a190..c2e20149cb9fee 100644
--- a/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll
@@ -5,11 +5,7 @@
 define <16 x i8> @test_l_v16i8(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 15
-; CHECK-NEXT:    lvsl 3, 0, 3
-; CHECK-NEXT:    lvx 2, 3, 4
-; CHECK-NEXT:    lvx 4, 0, 3
-; CHECK-NEXT:    vperm 2, 4, 2, 3
+; CHECK-NEXT:    lxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
   %r = load <16 x i8>, ptr %p, align 1
@@ -20,14 +16,9 @@ entry:
 define <32 x i8> @test_l_v32i8(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 31
-; CHECK-NEXT:    lvsl 4, 0, 3
-; CHECK-NEXT:    lvx 2, 3, 4
 ; CHECK-NEXT:    li 4, 16
-; CHECK-NEXT:    lvx 5, 3, 4
-; CHECK-NEXT:    vperm 3, 5, 2, 4
-; CHECK-NEXT:    lvx 2, 0, 3
-; CHECK-NEXT:    vperm 2, 2, 5, 4
+; CHECK-NEXT:    lxvw4x 34, 0, 3
+; CHECK-NEXT:    lxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
   %r = load <32 x i8>, ptr %p, align 1
@@ -38,11 +29,7 @@ entry:
 define <8 x i16> @test_l_v8i16(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 15
-; CHECK-NEXT:    lvsl 3, 0, 3
-; CHECK-NEXT:    lvx 2, 3, 4
-; CHECK-NEXT:    lvx 4, 0, 3
-; CHECK-NEXT:    vperm 2, 4, 2, 3
+; CHECK-NEXT:    lxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
   %r = load <8 x i16>, ptr %p, align 2
@@ -53,14 +40,9 @@ entry:
 define <16 x i16> @test_l_v16i16(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 31
-; CHECK-NEXT:    lvsl 4, 0, 3
-; CHECK-NEXT:    lvx 2, 3, 4
 ; CHECK-NEXT:    li 4, 16
-; CHECK-NEXT:    lvx 5, 3, 4
-; CHECK-NEXT:    vperm 3, 5, 2, 4
-; CHECK-NEXT:    lvx 2, 0, 3
-; CHECK-NEXT:    vperm 2, 2, 5, 4
+; CHECK-NEXT:    lxvw4x 34, 0, 3
+; CHECK-NEXT:    lxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
   %r = load <16 x i16>, ptr %p, align 2
@@ -71,11 +53,7 @@ entry:
 define <4 x i32> @test_l_v4i32(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 15
-; CHECK-NEXT:    lvsl 3, 0, 3
-; CHECK-NEXT:    lvx 2, 3, 4
-; CHECK-NEXT:    lvx 4, 0, 3
-; CHECK-NEXT:    vperm 2, 4, 2, 3
+; CHECK-NEXT:    lxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
   %r = load <4 x i32>, ptr %p, align 4
@@ -86,14 +64,9 @@ entry:
 define <8 x i32> @test_l_v8i32(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 31
-; CHECK-NEXT:    lvsl 4, 0, 3
-; CHECK-NEXT:    lvx 2, 3, 4
 ; CHECK-NEXT:    li 4, 16
-; CHECK-NEXT:    lvx 5, 3, 4
-; CHECK-NEXT:    vperm 3, 5, 2, 4
-; CHECK-NEXT:    lvx 2, 0, 3
-; CHECK-NEXT:    vperm 2, 2, 5, 4
+; CHECK-NEXT:    lxvw4x 34, 0, 3
+; CHECK-NEXT:    lxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
   %r = load <8 x i32>, ptr %p, align 4
@@ -128,11 +101,7 @@ entry:
 define <4 x float> @test_l_v4float(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v4float:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 15
-; CHECK-NEXT:    lvsl 3, 0, 3
-; CHECK-NEXT:    lvx 2, 3, 4
-; CHECK-NEXT:    lvx 4, 0, 3
-; CHECK-NEXT:    vperm 2, 4, 2, 3
+; CHECK-NEXT:    lxvw4x 34, 0, 3
 ; CHECK-NEXT:    blr
 entry:
   %r = load <4 x float>, ptr %p, align 4
@@ -143,14 +112,9 @@ entry:
 define <8 x float> @test_l_v8float(ptr %p) #0 {
 ; CHECK-LABEL: test_l_v8float:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 31
-; CHECK-NEXT:    lvsl 4, 0, 3
-; CHECK-NEXT:    lvx 2, 3, 4
 ; CHECK-NEXT:    li 4, 16
-; CHECK-NEXT:    lvx 5, 3, 4
-; CHECK-NEXT:    vperm 3, 5, 2, 4
-; CHECK-NEXT:    lvx 2, 0, 3
-; CHECK-NEXT:    vperm 2, 2, 5, 4
+; CHECK-NEXT:    lxvw4x 34, 0, 3
+; CHECK-NEXT:    lxvw4x 35, 3, 4
 ; CHECK-NEXT:    blr
 entry:
   %r = load <8 x float>, ptr %p, align 4
diff --git a/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll b/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
index 6f8b9c397124c4..2d3c5180664c2f 100644
--- a/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
@@ -13,5 +13,5 @@ entry:
 ; CHECK:     v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<(load (s248) from %ir.p - 15, align 1)>
 }
 
-attributes #0 = { nounwind "target-cpu"="pwr7" }
+attributes #0 = { nounwind "target-cpu"="pwr6" }
 
diff --git a/llvm/test/CodeGen/PowerPC/unaligned.ll b/llvm/test/CodeGen/PowerPC/unaligned.ll
index 481ed7230a2a5b..4d3c1487a69b98 100644
--- a/llvm/test/CodeGen/PowerPC/unaligned.ll
+++ b/llvm/test/CodeGen/PowerPC/unaligned.ll
@@ -123,12 +123,8 @@ define void @foo6(ptr %p, ptr %r) nounwind {
 ;
 ; CHECK-VSX-LABEL: foo6:
 ; CHECK-VSX:       # %bb.0: # %entry
-; CHECK-VSX-NEXT:    li 5, 15
-; CHECK-VSX-NEXT:    lvsl 3, 0, 3
-; CHECK-VSX-NEXT:    lvx 2, 3, 5
-; CHECK-VSX-NEXT:    lvx 4, 0, 3
-; CHECK-VSX-NEXT:    vperm 2, 4, 2, 3
-; CHECK-VSX-NEXT:    stxvw4x 34, 0, 4
+; CHECK-VSX-NEXT:    lxvw4x 0, 0, 3
+; CHECK-VSX-NEXT:    stxvw4x 0, 0, 4
 ; CHECK-VSX-NEXT:    blr
 ; For VSX on P7, unaligned loads and stores are preferable to aligned
 ; stack slots, but lvsl/vperm is better still.  (On P8 lxvw4x is preferable.)
diff --git a/llvm/test/CodeGen/PowerPC/vsx.ll b/llvm/test/CodeGen/PowerPC/vsx.ll
index 32cbfd6d810acc..88ff02d42108fc 100644
--- a/llvm/test/CodeGen/PowerPC/vsx.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx.ll
@@ -1079,29 +1079,17 @@ define void @test33(ptr %a, <4 x float> %b) {
 define <4 x float> @test32u(ptr %a) {
 ; CHECK-LABEL: test32u:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    li r4, 15
-; CHECK-NEXT:    lvsl v3, 0, r3
-; CHECK-NEXT:    lvx v2, r3, r4
-; CHECK-NEXT:    lvx v4, 0, r3
-; CHECK-NEXT:    vperm v2, v4, v2, v3
+; CHECK-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-NEXT:    blr
 ;
 ; CHECK-REG-LABEL: test32u:
 ; CHECK-REG:       # %bb.0:
-; CHECK-REG-NEXT:    li r4, 15
-; CHECK-REG-NEXT:    lvsl v3, 0, r3
-; CHECK-REG-NEXT:    lvx v2, r3, r4
-; CHECK-REG-NEXT:    lvx v4, 0, r3
-; CHECK-REG-NEXT:    vperm v2, v4, v2, v3
+; CHECK-REG-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-REG-NEXT:    blr
 ;
 ; CHECK-FISL-LABEL: test32u:
 ; CHECK-FISL:       # %bb.0:
-; CHECK-FISL-NEXT:    li r4, 15
-; CHECK-FISL-NEXT:    lvx v3, r3, r4
-; CHECK-FISL-NEXT:    lvsl v4, 0, r3
-; CHECK-FISL-NEXT:    lvx v2, 0, r3
-; CHECK-FISL-NEXT:    vperm v2, v2, v3, v4
+; CHECK-FISL-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-FISL-NEXT:    blr
 ;
 ; CHECK-LE-LABEL: test32u:

>From a775f498a5b28be72a7b50eaaf262951d1ff6cd3 Mon Sep 17 00:00:00 2001
From: Chen Zheng <czhengsz at cn.ibm.com>
Date: Tue, 2 Apr 2024 02:43:25 -0400
Subject: [PATCH 2/2] only generate stxvw4x/lxvw4x for unaligned address on P7
 AIX

Tested on AIX servers, it works well both with functionality and performance.
---
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp   | 14 +++--
 .../CodeGen/PowerPC/aix-vec-arg-spills-mir.ll | 62 +++++++++++--------
 llvm/test/CodeGen/PowerPC/memcpy-vec.ll       | 32 ++++++++--
 .../test/CodeGen/PowerPC/unal-altivec-wint.ll |  6 +-
 llvm/test/CodeGen/PowerPC/unal-altivec2.ll    |  2 +-
 llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll    | 40 ------------
 .../test/CodeGen/PowerPC/unal-vec-negarith.ll |  2 +-
 llvm/test/CodeGen/PowerPC/unaligned.ll        |  8 ++-
 llvm/test/CodeGen/PowerPC/vsx.ll              | 18 +++++-
 9 files changed, 99 insertions(+), 85 deletions(-)

diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 289e0bc29c4a55..95a863087d30b0 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -15965,8 +15965,10 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
     if (LD->isUnindexed() && VT.isVector() &&
         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
-          // Hardware has VSX should just use LOAD.
-          !Subtarget.hasVSX() &&
+          // P8 and later hardware should just use LOAD.
+          // AIX with VSX should just use LOAD.
+          !(Subtarget.hasP8Vector() ||
+            (Subtarget.hasVSX() && Subtarget.isAIXABI())) &&
           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
            VT == MVT::v4f32))) &&
         LD->getAlign() < ABIAlignment) {
@@ -17250,7 +17252,9 @@ bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
 EVT PPCTargetLowering::getOptimalMemOpType(
     const MemOp &Op, const AttributeList &FuncAttributes) const {
   if (getTargetMachine().getOptLevel() != CodeGenOptLevel::None) {
-    // We should use Altivec/VSX loads and stores when available.
+    // We should use Altivec/VSX loads and stores when available. For unaligned
+    // addresses, unaligned VSX loads are fast enough on P8 and P7 on AIX.
+    // FIXME: check the status of P7 on Linux.
     if (Subtarget.hasAltivec() && Op.size() >= 16) {
       if (Op.isMemset() && Subtarget.hasVSX()) {
         uint64_t TailSize = Op.size() % 16;
@@ -17262,7 +17266,9 @@ EVT PPCTargetLowering::getOptimalMemOpType(
         }
         return MVT::v4i32;
       }
-      if (Op.isAligned(Align(16)) || Subtarget.hasVSX())
+      if (Op.isAligned(Align(16)) ||
+          ((Subtarget.hasVSX() && Subtarget.isAIXABI()) ||
+           Subtarget.hasP8Vector()))
         return MVT::v4i32;
     }
   }
diff --git a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
index d927b9edb74d1d..d77699d54acc24 100644
--- a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=+altivec -vec-extabi \
 ; RUN:     -stop-after=machine-cp -mtriple powerpc-ibm-aix-xcoff < %s | \
 ; RUN:   FileCheck %s --check-prefix=MIR32
@@ -12,16 +12,21 @@
 @__const.caller.t = private unnamed_addr constant %struct.Test { double 0.000000e+00, double 1.000000e+00, double 2.000000e+00, double 3.000000e+00 }, align 8
 
 define double @caller() {
+
   ; MIR32-LABEL: name: caller
   ; MIR32: bb.0.entry:
-  ; MIR32-NEXT:   renamable $r3 = LWZtoc @__const.caller.t, $r2 :: (load (s32) from got)
-  ; MIR32-NEXT:   renamable $r4 = LI 16
-  ; MIR32-NEXT:   renamable $vsl0 = LXVW4X renamable $r3, killed renamable $r4 :: (load (s128) from unknown-address + 16, align 8)
-  ; MIR32-NEXT:   renamable $r4 = LI 172
-  ; MIR32-NEXT:   STXVW4X killed renamable $vsl0, $r1, killed renamable $r4 :: (store (s128) into unknown-address + 16, align 4)
-  ; MIR32-NEXT:   renamable $vsl0 = LXVW4X $zero, killed renamable $r3 :: (load (s128), align 8)
-  ; MIR32-NEXT:   renamable $r3 = LI 156
-  ; MIR32-NEXT:   STXVW4X killed renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 4)
+  ; MIR32-NEXT:   renamable $r3 = LI 0
+  ; MIR32-NEXT:   renamable $r4 = LIS 16392
+  ; MIR32-NEXT:   STW killed renamable $r4, 180, $r1 :: (store (s32) into unknown-address + 24)
+  ; MIR32-NEXT:   renamable $r4 = LIS 16384
+  ; MIR32-NEXT:   STW renamable $r3, 184, $r1 :: (store (s32) into unknown-address + 28)
+  ; MIR32-NEXT:   STW renamable $r3, 176, $r1 :: (store (s32) into unknown-address + 20)
+  ; MIR32-NEXT:   STW killed renamable $r4, 172, $r1 :: (store (s32) into unknown-address + 16)
+  ; MIR32-NEXT:   STW renamable $r3, 168, $r1 :: (store (s32) into unknown-address + 12)
+  ; MIR32-NEXT:   renamable $r4 = LIS 16368
+  ; MIR32-NEXT:   STW killed renamable $r4, 164, $r1 :: (store (s32) into unknown-address + 8)
+  ; MIR32-NEXT:   STW renamable $r3, 160, $r1 :: (store (s32) into unknown-address + 4)
+  ; MIR32-NEXT:   STW killed renamable $r3, 156, $r1 :: (store (s32))
   ; MIR32-NEXT:   ADJCALLSTACKDOWN 188, 0, implicit-def dead $r1, implicit $r1
   ; MIR32-NEXT:   renamable $vsl0 = XXLXORz
   ; MIR32-NEXT:   renamable $r3 = LI 136
@@ -73,30 +78,32 @@ define double @caller() {
   ;
   ; MIR64-LABEL: name: caller
   ; MIR64: bb.0.entry:
-  ; MIR64-NEXT:   renamable $x3 = LDtoc @__const.caller.t, $x2 :: (load (s64) from got)
-  ; MIR64-NEXT:   renamable $x4 = LI8 16
-  ; MIR64-NEXT:   renamable $vsl0 = LXVW4X renamable $x3, killed renamable $x4 :: (load (s128) from unknown-address + 16, align 8)
-  ; MIR64-NEXT:   renamable $x4 = LI8 208
-  ; MIR64-NEXT:   STXVW4X killed renamable $vsl0, $x1, killed renamable $x4 :: (store (s128) into unknown-address + 16, align 4)
-  ; MIR64-NEXT:   renamable $vsl0 = LXVW4X $zero8, killed renamable $x3 :: (load (s128), align 8)
-  ; MIR64-NEXT:   renamable $x3 = LI8 192
-  ; MIR64-NEXT:   STXVW4X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 4)
+  ; MIR64-NEXT:   renamable $x3 = LI8 2049
+  ; MIR64-NEXT:   renamable $x4 = LI8 1
+  ; MIR64-NEXT:   renamable $x3 = RLDIC killed renamable $x3, 51, 1
+  ; MIR64-NEXT:   STD killed renamable $x3, 216, $x1 :: (store (s64) into unknown-address + 24, align 4)
+  ; MIR64-NEXT:   renamable $x3 = LI8 1023
+  ; MIR64-NEXT:   renamable $x4 = RLDIC killed renamable $x4, 62, 1
+  ; MIR64-NEXT:   STD killed renamable $x4, 208, $x1 :: (store (s64) into unknown-address + 16, align 4)
+  ; MIR64-NEXT:   renamable $x4 = LI8 0
+  ; MIR64-NEXT:   STD renamable $x4, 192, $x1 :: (store (s64), align 4)
+  ; MIR64-NEXT:   renamable $x3 = RLDIC killed renamable $x3, 52, 2
+  ; MIR64-NEXT:   STD killed renamable $x3, 200, $x1 :: (store (s64) into unknown-address + 8, align 4)
   ; MIR64-NEXT:   ADJCALLSTACKDOWN 224, 0, implicit-def dead $r1, implicit $r1
   ; MIR64-NEXT:   renamable $vsl0 = XXLXORz
   ; MIR64-NEXT:   renamable $x3 = LI8 160
-  ; MIR64-NEXT:   renamable $x4 = LI8 144
   ; MIR64-NEXT:   STXVW4X renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8)
-  ; MIR64-NEXT:   STXVW4X renamable $vsl0, $x1, killed renamable $x4 :: (store (s128), align 8)
+  ; MIR64-NEXT:   renamable $x3 = LI8 144
+  ; MIR64-NEXT:   STXVW4X renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8)
   ; MIR64-NEXT:   renamable $x3 = LI8 128
-  ; MIR64-NEXT:   renamable $x4 = LDtocCPT %const.0, $x2 :: (load (s64) from got)
   ; MIR64-NEXT:   STXVW4X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8)
+  ; MIR64-NEXT:   renamable $x3 = LDtocCPT %const.0, $x2 :: (load (s64) from got)
+  ; MIR64-NEXT:   renamable $vsl0 = LXVD2X $zero8, killed renamable $x3 :: (load (s128) from constant-pool)
   ; MIR64-NEXT:   renamable $x3 = LI8 80
-  ; MIR64-NEXT:   renamable $vsl0 = LXVD2X $zero8, killed renamable $x4 :: (load (s128) from constant-pool)
-  ; MIR64-NEXT:   renamable $x4 = LI8 512
   ; MIR64-NEXT:   STXVD2X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128))
-  ; MIR64-NEXT:   renamable $x3 = LI8 0
-  ; MIR64-NEXT:   STD killed renamable $x4, 184, $x1 :: (store (s64))
-  ; MIR64-NEXT:   STD killed renamable $x3, 176, $x1 :: (store (s64))
+  ; MIR64-NEXT:   renamable $x3 = LI8 512
+  ; MIR64-NEXT:   STD killed renamable $x3, 184, $x1 :: (store (s64))
+  ; MIR64-NEXT:   STD killed renamable $x4, 176, $x1 :: (store (s64))
   ; MIR64-NEXT:   $f1 = XXLXORdpz
   ; MIR64-NEXT:   $f2 = XXLXORdpz
   ; MIR64-NEXT:   $v2 = XXLXORz
@@ -105,8 +112,8 @@ define double @caller() {
   ; MIR64-NEXT:   $v5 = XXLXORz
   ; MIR64-NEXT:   $v6 = XXLXORz
   ; MIR64-NEXT:   $x3 = LI8 128
-  ; MIR64-NEXT:   $v7 = XXLXORz
   ; MIR64-NEXT:   $x4 = LI8 256
+  ; MIR64-NEXT:   $v7 = XXLXORz
   ; MIR64-NEXT:   $v8 = XXLXORz
   ; MIR64-NEXT:   $v9 = XXLXORz
   ; MIR64-NEXT:   $v10 = XXLXORz
@@ -129,7 +136,8 @@ define double @caller() {
   ; MIR64-NEXT:   BLR8 implicit $lr8, implicit $rm, implicit $f1
   entry:
     %call = tail call double @callee(i32 signext 128, i32 signext 256, double 0.000000e+00, double 0.000000e+00, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 2.400000e+01, double 2.500000e+01>, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, i32 signext 512, ptr nonnull byval(%struct.Test) align 4 @__const.caller.t)
-  ret double %call
+      ret double %call
 }
 
 declare double @callee(i32 signext, i32 signext, double, double, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, <2 x double>, double, double, double, double, double, double, double, double, double, double, double, i32 signext, ptr byval(%struct.Test) align 8)
+
diff --git a/llvm/test/CodeGen/PowerPC/memcpy-vec.ll b/llvm/test/CodeGen/PowerPC/memcpy-vec.ll
index 34a7af4bc45916..003e20a49b3fa8 100644
--- a/llvm/test/CodeGen/PowerPC/memcpy-vec.ll
+++ b/llvm/test/CodeGen/PowerPC/memcpy-vec.ll
@@ -1,4 +1,5 @@
 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck  %s -check-prefix=PWR7
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc64-ibm-aix-xcoff < %s | FileCheck  %s -check-prefixes=PWR7-AIX
 ; RUN: llc -verify-machineinstrs -mcpu=pwr8 < %s | FileCheck  %s -check-prefix=PWR8
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
@@ -10,10 +11,19 @@ entry:
   ret void
 
 ; PWR7-LABEL: @foo1
-; PWR7: lxvw4x
-; PWR7: stxvw4x
+; PWR7-NOT: bl memcpy
+; PWR7-DAG: li [[OFFSET:[0-9]+]], 16
+; PWR7-DAG: lxvd2x [[TMP0:[0-9]+]], 4, [[OFFSET]]
+; PWR7-DAG: stxvd2x [[TMP0]], 3, [[OFFSET]]
+; PWR7-DAG: lxvd2x [[TMP1:[0-9]+]], 0, 4
+; PWR7-DAG: stxvd2x [[TMP1]], 0, 3
 ; PWR7: blr
 
+; PWR7-AIX-LABEL: @foo1
+; PWR7-AIX: lxvw4x
+; PWR7-AIX: stxvw4x
+; PWR7-AIX: blr
+
 ; PWR8-LABEL: @foo1
 ; PWR8: lxvw4x
 ; PWR8: stxvw4x
@@ -30,10 +40,14 @@ entry:
   ret void
 
 ; PWR7-LABEL: @foo2
-; PWR7: lxvw4x
-; PWR7: stxvw4x
+; PWR7: bl memcpy
 ; PWR7: blr
 
+; PWR7-AIX-LABEL: @foo2
+; PWR7-AIX: lxvw4x
+; PWR7-AIX: stxvw4x
+; PWR7-AIX: blr
+
 ; PWR8-LABEL: @foo2
 ; PWR8: lxvw4x
 ; PWR8: stxvw4x
@@ -51,6 +65,11 @@ entry:
 ; PWR7: stxvw4x
 ; PWR7: blr
 
+; PWR7-AIX-LABEL: @bar1
+; PWR7-AIX-NOT: bl memset
+; PWR7-AIX: stxvw4x
+; PWR7-AIX: blr
+
 ; PWR8-LABEL: @bar1
 ; PWR8-NOT: bl memset
 ; PWR8: stxvw4x
@@ -68,6 +87,11 @@ entry:
 ; PWR7: stxvw4x
 ; PWR7: blr
 
+; PWR7-AIX-LABEL: @bar2
+; PWR7-AIX-NOT: bl memset
+; PWR7-AIX: stxvw4x
+; PWR7-AIX: blr
+
 ; PWR8-LABEL: @bar2
 ; PWR8-NOT: bl memset
 ; PWR8: stxvw4x
diff --git a/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll b/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll
index a590f54b5a6765..d6244cd828e5a6 100644
--- a/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll
@@ -17,7 +17,7 @@ entry:
 ; CHECK-LABEL: @test1
 ; CHECK: li [[REG:[0-9]+]], 16
 ; CHECK-NOT: li {{[0-9]+}}, 15
-; CHECK-DAG: lxvw4x {{[0-9]+}}, 0, 3
+; CHECK-DAG: lvx {{[0-9]+}}, 0, 3
 ; CHECK-DAG: lvx {{[0-9]+}}, 3, [[REG]]
 ; CHECK: blr
 }
@@ -36,8 +36,8 @@ entry:
 ; CHECK-LABEL: @test2
 ; CHECK: li [[REG:[0-9]+]], 16
 ; CHECK-NOT: li {{[0-9]+}}, 15
-; CHECK-DAG: stvx 2, 3, [[REG]]
-; CHECK-DAG: lxvw4x {{[0-9]+}}, 0, 3
+; CHECK-DAG: lvx {{[0-9]+}}, 0, 3
+; CHECK-DAG: lvx {{[0-9]+}}, 3, [[REG]]
 ; CHECK: blr
 }
 
diff --git a/llvm/test/CodeGen/PowerPC/unal-altivec2.ll b/llvm/test/CodeGen/PowerPC/unal-altivec2.ll
index 39a82fe0a0977c..fafcab8468eb4d 100644
--- a/llvm/test/CodeGen/PowerPC/unal-altivec2.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-altivec2.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr6 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s | FileCheck %s
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
diff --git a/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll b/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll
index c2e20149cb9fee..c8563b07f7f34d 100644
--- a/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-vec-ldst.ll
@@ -3,10 +3,6 @@
 ; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -verify-machineinstrs -vec-extabi < %s | FileCheck %s
 
 define <16 x i8> @test_l_v16i8(ptr %p) #0 {
-; CHECK-LABEL: test_l_v16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    lxvw4x 34, 0, 3
-; CHECK-NEXT:    blr
 entry:
   %r = load <16 x i8>, ptr %p, align 1
   ret <16 x i8> %r
@@ -14,12 +10,6 @@ entry:
 }
 
 define <32 x i8> @test_l_v32i8(ptr %p) #0 {
-; CHECK-LABEL: test_l_v32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 16
-; CHECK-NEXT:    lxvw4x 34, 0, 3
-; CHECK-NEXT:    lxvw4x 35, 3, 4
-; CHECK-NEXT:    blr
 entry:
   %r = load <32 x i8>, ptr %p, align 1
   ret <32 x i8> %r
@@ -27,10 +17,6 @@ entry:
 }
 
 define <8 x i16> @test_l_v8i16(ptr %p) #0 {
-; CHECK-LABEL: test_l_v8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    lxvw4x 34, 0, 3
-; CHECK-NEXT:    blr
 entry:
   %r = load <8 x i16>, ptr %p, align 2
   ret <8 x i16> %r
@@ -38,12 +24,6 @@ entry:
 }
 
 define <16 x i16> @test_l_v16i16(ptr %p) #0 {
-; CHECK-LABEL: test_l_v16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 16
-; CHECK-NEXT:    lxvw4x 34, 0, 3
-; CHECK-NEXT:    lxvw4x 35, 3, 4
-; CHECK-NEXT:    blr
 entry:
   %r = load <16 x i16>, ptr %p, align 2
   ret <16 x i16> %r
@@ -51,10 +31,6 @@ entry:
 }
 
 define <4 x i32> @test_l_v4i32(ptr %p) #0 {
-; CHECK-LABEL: test_l_v4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    lxvw4x 34, 0, 3
-; CHECK-NEXT:    blr
 entry:
   %r = load <4 x i32>, ptr %p, align 4
   ret <4 x i32> %r
@@ -62,12 +38,6 @@ entry:
 }
 
 define <8 x i32> @test_l_v8i32(ptr %p) #0 {
-; CHECK-LABEL: test_l_v8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 16
-; CHECK-NEXT:    lxvw4x 34, 0, 3
-; CHECK-NEXT:    lxvw4x 35, 3, 4
-; CHECK-NEXT:    blr
 entry:
   %r = load <8 x i32>, ptr %p, align 4
   ret <8 x i32> %r
@@ -99,10 +69,6 @@ entry:
 }
 
 define <4 x float> @test_l_v4float(ptr %p) #0 {
-; CHECK-LABEL: test_l_v4float:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    lxvw4x 34, 0, 3
-; CHECK-NEXT:    blr
 entry:
   %r = load <4 x float>, ptr %p, align 4
   ret <4 x float> %r
@@ -110,12 +76,6 @@ entry:
 }
 
 define <8 x float> @test_l_v8float(ptr %p) #0 {
-; CHECK-LABEL: test_l_v8float:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    li 4, 16
-; CHECK-NEXT:    lxvw4x 34, 0, 3
-; CHECK-NEXT:    lxvw4x 35, 3, 4
-; CHECK-NEXT:    blr
 entry:
   %r = load <8 x float>, ptr %p, align 4
   ret <8 x float> %r
diff --git a/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll b/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
index 2d3c5180664c2f..6f8b9c397124c4 100644
--- a/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
@@ -13,5 +13,5 @@ entry:
 ; CHECK:     v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<(load (s248) from %ir.p - 15, align 1)>
 }
 
-attributes #0 = { nounwind "target-cpu"="pwr6" }
+attributes #0 = { nounwind "target-cpu"="pwr7" }
 
diff --git a/llvm/test/CodeGen/PowerPC/unaligned.ll b/llvm/test/CodeGen/PowerPC/unaligned.ll
index 4d3c1487a69b98..481ed7230a2a5b 100644
--- a/llvm/test/CodeGen/PowerPC/unaligned.ll
+++ b/llvm/test/CodeGen/PowerPC/unaligned.ll
@@ -123,8 +123,12 @@ define void @foo6(ptr %p, ptr %r) nounwind {
 ;
 ; CHECK-VSX-LABEL: foo6:
 ; CHECK-VSX:       # %bb.0: # %entry
-; CHECK-VSX-NEXT:    lxvw4x 0, 0, 3
-; CHECK-VSX-NEXT:    stxvw4x 0, 0, 4
+; CHECK-VSX-NEXT:    li 5, 15
+; CHECK-VSX-NEXT:    lvsl 3, 0, 3
+; CHECK-VSX-NEXT:    lvx 2, 3, 5
+; CHECK-VSX-NEXT:    lvx 4, 0, 3
+; CHECK-VSX-NEXT:    vperm 2, 4, 2, 3
+; CHECK-VSX-NEXT:    stxvw4x 34, 0, 4
 ; CHECK-VSX-NEXT:    blr
 ; For VSX on P7, unaligned loads and stores are preferable to aligned
 ; stack slots, but lvsl/vperm is better still.  (On P8 lxvw4x is preferable.)
diff --git a/llvm/test/CodeGen/PowerPC/vsx.ll b/llvm/test/CodeGen/PowerPC/vsx.ll
index 88ff02d42108fc..32cbfd6d810acc 100644
--- a/llvm/test/CodeGen/PowerPC/vsx.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx.ll
@@ -1079,17 +1079,29 @@ define void @test33(ptr %a, <4 x float> %b) {
 define <4 x float> @test32u(ptr %a) {
 ; CHECK-LABEL: test32u:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    lxvw4x v2, 0, r3
+; CHECK-NEXT:    li r4, 15
+; CHECK-NEXT:    lvsl v3, 0, r3
+; CHECK-NEXT:    lvx v2, r3, r4
+; CHECK-NEXT:    lvx v4, 0, r3
+; CHECK-NEXT:    vperm v2, v4, v2, v3
 ; CHECK-NEXT:    blr
 ;
 ; CHECK-REG-LABEL: test32u:
 ; CHECK-REG:       # %bb.0:
-; CHECK-REG-NEXT:    lxvw4x v2, 0, r3
+; CHECK-REG-NEXT:    li r4, 15
+; CHECK-REG-NEXT:    lvsl v3, 0, r3
+; CHECK-REG-NEXT:    lvx v2, r3, r4
+; CHECK-REG-NEXT:    lvx v4, 0, r3
+; CHECK-REG-NEXT:    vperm v2, v4, v2, v3
 ; CHECK-REG-NEXT:    blr
 ;
 ; CHECK-FISL-LABEL: test32u:
 ; CHECK-FISL:       # %bb.0:
-; CHECK-FISL-NEXT:    lxvw4x v2, 0, r3
+; CHECK-FISL-NEXT:    li r4, 15
+; CHECK-FISL-NEXT:    lvx v3, r3, r4
+; CHECK-FISL-NEXT:    lvsl v4, 0, r3
+; CHECK-FISL-NEXT:    lvx v2, 0, r3
+; CHECK-FISL-NEXT:    vperm v2, v2, v3, v4
 ; CHECK-FISL-NEXT:    blr
 ;
 ; CHECK-LE-LABEL: test32u:



More information about the llvm-commits mailing list