[flang-commits] [flang] 246b57c - Fix tests in flang/test/Lower/PowerPC after splat change.

Paul Walker via flang-commits flang-commits at lists.llvm.org
Wed Nov 6 05:31:20 PST 2024


Author: Paul Walker
Date: 2024-11-06T13:30:15Z
New Revision: 246b57cb2086b22ad8b41051c77e86ef478053a1

URL: https://github.com/llvm/llvm-project/commit/246b57cb2086b22ad8b41051c77e86ef478053a1
DIFF: https://github.com/llvm/llvm-project/commit/246b57cb2086b22ad8b41051c77e86ef478053a1.diff

LOG: Fix tests in flang/test/Lower/PowerPC after splat change.

Added: 
    

Modified: 
    flang/test/Lower/PowerPC/ppc-vec-cmp.f90
    flang/test/Lower/PowerPC/ppc-vec-convert.f90
    flang/test/Lower/PowerPC/ppc-vec-perm.f90
    flang/test/Lower/PowerPC/ppc-vec-sel.f90
    flang/test/Lower/PowerPC/ppc-vec-shift.f90
    flang/test/Lower/PowerPC/ppc-vec-splat.f90

Removed: 
    


################################################################################
diff  --git a/flang/test/Lower/PowerPC/ppc-vec-cmp.f90 b/flang/test/Lower/PowerPC/ppc-vec-cmp.f90
index 2fbef7a70122fb..e8dba78c34b542 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-cmp.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-cmp.f90
@@ -1,4 +1,5 @@
-! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
 ! REQUIRES: target=powerpc{{.*}}
 
 !----------------------
@@ -14,7 +15,7 @@ subroutine vec_cmpge_test_i8(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %[[arg2]], <2 x i64> %[[arg1]])
-! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
+! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat (i64 -1)
 end subroutine vec_cmpge_test_i8
 
 ! CHECK-LABEL: vec_cmpge_test_i4
@@ -26,7 +27,7 @@ subroutine vec_cmpge_test_i4(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtsw(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]])
-! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
+! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat (i32 -1)
 end subroutine vec_cmpge_test_i4
 
 ! CHECK-LABEL: vec_cmpge_test_i2
@@ -38,7 +39,7 @@ subroutine vec_cmpge_test_i2(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtsh(<8 x i16> %[[arg2]], <8 x i16> %[[arg1]])
-! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat (i16 -1)
 end subroutine vec_cmpge_test_i2
 
 ! CHECK-LABEL: vec_cmpge_test_i1
@@ -50,7 +51,7 @@ subroutine vec_cmpge_test_i1(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtsb(<16 x i8> %[[arg2]], <16 x i8> %[[arg1]])
-! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat (i8 -1)
 end subroutine vec_cmpge_test_i1
 
 ! CHECK-LABEL: vec_cmpge_test_u8
@@ -62,7 +63,7 @@ subroutine vec_cmpge_test_u8(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %[[arg2]], <2 x i64> %[[arg1]])
-! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
+! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat (i64 -1)
 end subroutine vec_cmpge_test_u8
 
 ! CHECK-LABEL: vec_cmpge_test_u4
@@ -74,7 +75,7 @@ subroutine vec_cmpge_test_u4(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtuw(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]])
-! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
+! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat (i32 -1)
 end subroutine vec_cmpge_test_u4
 
 ! CHECK-LABEL: vec_cmpge_test_u2
@@ -86,7 +87,7 @@ subroutine vec_cmpge_test_u2(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtuh(<8 x i16> %[[arg2]], <8 x i16> %[[arg1]])
-! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat (i16 -1)
 end subroutine vec_cmpge_test_u2
 
 ! CHECK-LABEL: vec_cmpge_test_u1
@@ -98,7 +99,7 @@ subroutine vec_cmpge_test_u1(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtub(<16 x i8> %[[arg2]], <16 x i8> %[[arg1]])
-! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat (i8 -1)
 end subroutine vec_cmpge_test_u1
 
 subroutine vec_cmpge_test_r4(arg1, arg2)
@@ -248,7 +249,7 @@ subroutine vec_cmple_test_i8(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %[[arg1]], <2 x i64> %[[arg2]])
-! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
+! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat (i64 -1)
 end subroutine vec_cmple_test_i8
 
 ! CHECK-LABEL: vec_cmple_test_i4
@@ -260,7 +261,7 @@ subroutine vec_cmple_test_i4(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtsw(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
-! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
+! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat (i32 -1)
 end subroutine vec_cmple_test_i4
 
 ! CHECK-LABEL: vec_cmple_test_i2
@@ -272,7 +273,7 @@ subroutine vec_cmple_test_i2(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtsh(<8 x i16> %[[arg1]], <8 x i16> %[[arg2]])
-! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat (i16 -1)
 end subroutine vec_cmple_test_i2
 
 ! CHECK-LABEL: vec_cmple_test_i1
@@ -284,7 +285,7 @@ subroutine vec_cmple_test_i1(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtsb(<16 x i8> %[[arg1]], <16 x i8> %[[arg2]])
-! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat (i8 -1)
 end subroutine vec_cmple_test_i1
 
 ! CHECK-LABEL: vec_cmple_test_u8
@@ -296,7 +297,7 @@ subroutine vec_cmple_test_u8(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %[[arg1]], <2 x i64> %[[arg2]])
-! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
+! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat (i64 -1)
 end subroutine vec_cmple_test_u8
 
 ! CHECK-LABEL: vec_cmple_test_u4
@@ -308,7 +309,7 @@ subroutine vec_cmple_test_u4(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtuw(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
-! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
+! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat (i32 -1)
 end subroutine vec_cmple_test_u4
 
 ! CHECK-LABEL: vec_cmple_test_u2
@@ -320,7 +321,7 @@ subroutine vec_cmple_test_u2(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtuh(<8 x i16> %[[arg1]], <8 x i16> %[[arg2]])
-! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat (i16 -1)
 end subroutine vec_cmple_test_u2
 
 ! CHECK-LABEL: vec_cmple_test_u1
@@ -332,7 +333,7 @@ subroutine vec_cmple_test_u1(arg1, arg2)
 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtub(<16 x i8> %[[arg1]], <16 x i8> %[[arg2]])
-! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat (i8 -1)
 end subroutine vec_cmple_test_u1
 
 ! CHECK-LABEL: vec_cmple_test_r4

diff  --git a/flang/test/Lower/PowerPC/ppc-vec-convert.f90 b/flang/test/Lower/PowerPC/ppc-vec-convert.f90
index 0f449a86dbe96f..47763c660e8e81 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-convert.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-convert.f90
@@ -57,7 +57,7 @@ subroutine vec_ctf_test_i8i1(arg1)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[carg:.*]] = sitofp <2 x i64> %[[arg1]] to <2 x double>
-! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
+! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
 end subroutine vec_ctf_test_i8i1
 
@@ -69,7 +69,7 @@ subroutine vec_ctf_test_i8i2(arg1)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[carg:.*]] = sitofp <2 x i64> %[[arg1]] to <2 x double>
-! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
+! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
 end subroutine vec_ctf_test_i8i2
 
@@ -81,7 +81,7 @@ subroutine vec_ctf_test_i8i4(arg1)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[carg:.*]] = sitofp <2 x i64> %[[arg1]] to <2 x double>
-! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
+! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
 end subroutine vec_ctf_test_i8i4
 
@@ -93,7 +93,7 @@ subroutine vec_ctf_test_i8i8(arg1)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[carg:.*]] = sitofp <2 x i64> %[[arg1]] to <2 x double>
-! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
+! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
 end subroutine vec_ctf_test_i8i8
 
@@ -149,7 +149,7 @@ subroutine vec_ctf_test_u8i1(arg1)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[carg:.*]] = uitofp <2 x i64> %[[arg1]] to <2 x double>
-! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
+! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
 end subroutine vec_ctf_test_u8i1
 
@@ -161,7 +161,7 @@ subroutine vec_ctf_test_u8i2(arg1)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[carg:.*]] = uitofp <2 x i64> %[[arg1]] to <2 x double>
-! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
+! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
 end subroutine vec_ctf_test_u8i2
 
@@ -173,7 +173,7 @@ subroutine vec_ctf_test_u8i4(arg1)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[carg:.*]] = uitofp <2 x i64> %[[arg1]] to <2 x double>
-! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
+! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
 end subroutine vec_ctf_test_u8i4
 
@@ -185,7 +185,7 @@ subroutine vec_ctf_test_u8i8(arg1)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[carg:.*]] = uitofp <2 x i64> %[[arg1]] to <2 x double>
-! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
+! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
 end subroutine vec_ctf_test_u8i8
 

diff  --git a/flang/test/Lower/PowerPC/ppc-vec-perm.f90 b/flang/test/Lower/PowerPC/ppc-vec-perm.f90
index 5353e9c10db7dd..20dc92bdf80665 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-perm.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-perm.f90
@@ -13,7 +13,7 @@ subroutine vec_perm_test_i1(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
-! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
 ! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <16 x i8>
@@ -31,7 +31,7 @@ subroutine vec_perm_test_i2(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
-! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
 ! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <8 x i16>
@@ -47,7 +47,7 @@ subroutine vec_perm_test_i4(arg1, arg2, arg3)
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
-! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]], <16 x i8> %[[xor]])
 ! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: store <4 x i32> %[[call]], ptr %{{.*}}, align 16
@@ -64,7 +64,7 @@ subroutine vec_perm_test_i8(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <4 x i32>
-! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
 ! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <2 x i64>
@@ -82,7 +82,7 @@ subroutine vec_perm_test_u1(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
-! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
 ! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <16 x i8>
@@ -100,7 +100,7 @@ subroutine vec_perm_test_u2(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
-! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
 ! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <8 x i16>
@@ -116,7 +116,7 @@ subroutine vec_perm_test_u4(arg1, arg2, arg3)
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
-! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]], <16 x i8> %[[xor]])
 ! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: store <4 x i32> %[[call]], ptr %{{.*}}, align 16
@@ -133,7 +133,7 @@ subroutine vec_perm_test_u8(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <4 x i32>
-! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
 ! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <2 x i64>
@@ -151,7 +151,7 @@ subroutine vec_perm_test_r4(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <4 x i32>
-! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
 ! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <4 x float>
@@ -169,7 +169,7 @@ subroutine vec_perm_test_r8(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <2 x double> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <2 x double> %[[arg2]] to <4 x i32>
-! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
 ! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <2 x double>

diff  --git a/flang/test/Lower/PowerPC/ppc-vec-sel.f90 b/flang/test/Lower/PowerPC/ppc-vec-sel.f90
index c3a7288b6b4d05..c3de8ba9c1444b 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-sel.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-sel.f90
@@ -1,4 +1,5 @@
-! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
 ! REQUIRES: target=powerpc{{.*}}
 
 !----------------------
@@ -14,7 +15,7 @@ subroutine vec_sel_testi1(arg1, arg2, arg3)
 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
-! LLVMIR:  %[[comp:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR:  %[[comp:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR:  %[[and1:.*]] = and <16 x i8> %[[arg1]], %[[comp]]
 ! LLVMIR:  %[[and2:.*]] = and <16 x i8> %[[arg2]], %[[arg3]]
 ! LLVMIR:  %{{[0-9]+}} = or <16 x i8> %[[and1]], %[[and2]]
@@ -32,7 +33,7 @@ subroutine vec_sel_testi2(arg1, arg2, arg3)
 ! LLVMIR: %[[bc1:.*]] = bitcast <8 x i16> %5 to <16 x i8>
 ! LLVMIR: %[[bc2:.*]] = bitcast <8 x i16> %6 to <16 x i8>
 ! LLVMIR: %[[bc3:.*]] = bitcast <8 x i16> %7 to <16 x i8>
-! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], splat (i8 -1)
 ! LLVMIR: %[[and1:.*]] = and <16 x i8> %[[bc1]], %[[comp]]
 ! LLVMIR: %[[and2:.*]] = and <16 x i8> %[[bc2]], %[[bc3]]
 ! LLVMIR: %[[or:.*]] = or <16 x i8> %[[and1]], %[[and2]]
@@ -51,7 +52,7 @@ subroutine vec_sel_testi4(arg1, arg2, arg3)
 ! LLVMIR: %[[bc1:.*]] = bitcast <4 x i32> %5 to <16 x i8>
 ! LLVMIR: %[[bc2:.*]] = bitcast <4 x i32> %6 to <16 x i8>
 ! LLVMIR: %[[bc3:.*]] = bitcast <4 x i32> %7 to <16 x i8>
-! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], splat (i8 -1)
 ! LLVMIR: %[[and1:.*]] = and <16 x i8> %[[bc1]], %[[comp]]
 ! LLVMIR: %[[and2:.*]] = and <16 x i8> %[[bc2]], %[[bc3]]
 ! LLVMIR: %[[or:.*]] = or <16 x i8> %[[and1]], %[[and2]]
@@ -70,7 +71,7 @@ subroutine vec_sel_testi8(arg1, arg2, arg3)
 ! LLVMIR: %[[bc1:.*]] = bitcast <2 x i64> %5 to <16 x i8>
 ! LLVMIR: %[[bc2:.*]] = bitcast <2 x i64> %6 to <16 x i8>
 ! LLVMIR: %[[bc3:.*]] = bitcast <2 x i64> %7 to <16 x i8>
-! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], splat (i8 -1)
 ! LLVMIR: %[[and1:.*]] = and <16 x i8> %[[bc1]], %[[comp]]
 ! LLVMIR: %[[and2:.*]] = and <16 x i8> %[[bc2]], %[[bc3]]
 ! LLVMIR: %[[or:.*]] = or <16 x i8> %[[and1]], %[[and2]]
@@ -86,7 +87,7 @@ subroutine vec_sel_testu1(arg1, arg2, arg3)
 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
-! LLVMIR:  %[[comp:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR:  %[[comp:.*]] = xor <16 x i8> %[[arg3]], splat (i8 -1)
 ! LLVMIR:  %[[and1:.*]] = and <16 x i8> %[[arg1]], %[[comp]]
 ! LLVMIR:  %[[and2:.*]] = and <16 x i8> %[[arg2]], %[[arg3]]
 ! LLVMIR:  %{{[0-9]+}} = or <16 x i8> %[[and1]], %[[and2]]
@@ -104,7 +105,7 @@ subroutine vec_sel_testu2(arg1, arg2, arg3)
 ! LLVMIR: %[[bc1:.*]] = bitcast <8 x i16> %5 to <16 x i8>
 ! LLVMIR: %[[bc2:.*]] = bitcast <8 x i16> %6 to <16 x i8>
 ! LLVMIR: %[[bc3:.*]] = bitcast <8 x i16> %7 to <16 x i8>
-! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], splat (i8 -1)
 ! LLVMIR: %[[and1:.*]] = and <16 x i8> %[[bc1]], %[[comp]]
 ! LLVMIR: %[[and2:.*]] = and <16 x i8> %[[bc2]], %[[bc3]]
 ! LLVMIR: %[[or:.*]] = or <16 x i8> %[[and1]], %[[and2]]
@@ -123,7 +124,7 @@ subroutine vec_sel_testu4(arg1, arg2, arg3)
 ! LLVMIR: %[[bc1:.*]] = bitcast <4 x i32> %5 to <16 x i8>
 ! LLVMIR: %[[bc2:.*]] = bitcast <4 x i32> %6 to <16 x i8>
 ! LLVMIR: %[[bc3:.*]] = bitcast <4 x i32> %7 to <16 x i8>
-! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], splat (i8 -1)
 ! LLVMIR: %[[and1:.*]] = and <16 x i8> %[[bc1]], %[[comp]]
 ! LLVMIR: %[[and2:.*]] = and <16 x i8> %[[bc2]], %[[bc3]]
 ! LLVMIR: %[[or:.*]] = or <16 x i8> %[[and1]], %[[and2]]
@@ -143,7 +144,7 @@ subroutine vec_sel_testu8(arg1, arg2, arg3)
 ! LLVMIR: %[[bc1:.*]] = bitcast <2 x i64> %5 to <16 x i8>
 ! LLVMIR: %[[bc2:.*]] = bitcast <2 x i64> %6 to <16 x i8>
 ! LLVMIR: %[[bc3:.*]] = bitcast <2 x i64> %7 to <16 x i8>
-! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], splat (i8 -1)
 ! LLVMIR: %[[and1:.*]] = and <16 x i8> %[[bc1]], %[[comp]]
 ! LLVMIR: %[[and2:.*]] = and <16 x i8> %[[bc2]], %[[bc3]]
 ! LLVMIR: %[[or:.*]] = or <16 x i8> %[[and1]], %[[and2]]
@@ -162,7 +163,7 @@ subroutine vec_sel_testr4(arg1, arg2, arg3)
 ! LLVMIR: %[[bc1:.*]] = bitcast <4 x float> %5 to <16 x i8>
 ! LLVMIR: %[[bc2:.*]] = bitcast <4 x float> %6 to <16 x i8>
 ! LLVMIR: %[[bc3:.*]] = bitcast <4 x i32> %7 to <16 x i8>
-! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], splat (i8 -1)
 ! LLVMIR: %[[and1:.*]] = and <16 x i8> %[[bc1]], %[[comp]]
 ! LLVMIR: %[[and2:.*]] = and <16 x i8> %[[bc2]], %[[bc3]]
 ! LLVMIR: %[[or:.*]] = or <16 x i8> %[[and1]], %[[and2]]
@@ -181,7 +182,7 @@ subroutine vec_sel_testr8(arg1, arg2, arg3)
 ! LLVMIR: %[[bc1:.*]] = bitcast <2 x double> %5 to <16 x i8>
 ! LLVMIR: %[[bc2:.*]] = bitcast <2 x double> %6 to <16 x i8>
 ! LLVMIR: %[[bc3:.*]] = bitcast <2 x i64> %7 to <16 x i8>
-! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR: %[[comp:.*]] = xor <16 x i8> %[[bc3]], splat (i8 -1)
 ! LLVMIR: %[[and1:.*]] = and <16 x i8> %[[bc1]], %[[comp]]
 ! LLVMIR: %[[and2:.*]] = and <16 x i8> %[[bc2]], %[[bc3]]
 ! LLVMIR: %[[or:.*]] = or <16 x i8> %[[and1]], %[[and2]]

diff  --git a/flang/test/Lower/PowerPC/ppc-vec-shift.f90 b/flang/test/Lower/PowerPC/ppc-vec-shift.f90
index a20f086c769adc..bbcc9c76080b9a 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-shift.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-shift.f90
@@ -1,4 +1,5 @@
-! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
 ! REQUIRES: target=powerpc{{.*}}
 
 !----------------------
@@ -13,7 +14,7 @@ subroutine vec_sl_i1(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <16 x i8> %[[arg2]], <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+! LLVMIR: %[[msk:.*]] = urem <16 x i8> %[[arg2]], splat (i8 8)
 ! LLVMIR: %7 = shl <16 x i8> %[[arg1]], %[[msk]]
 end subroutine vec_sl_i1
 
@@ -25,7 +26,7 @@ subroutine vec_sl_i2(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <8 x i16> %[[arg2]], <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+! LLVMIR: %[[msk:.*]] = urem <8 x i16> %[[arg2]], splat (i16 16)
 ! LLVMIR: %7 = shl <8 x i16> %[[arg1]], %[[msk]]
 end subroutine vec_sl_i2
 
@@ -37,7 +38,7 @@ subroutine vec_sl_i4(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <4 x i32> %[[arg2]], <i32 32, i32 32, i32 32, i32 32>
+! LLVMIR: %[[msk:.*]] = urem <4 x i32> %[[arg2]], splat (i32 32)
 ! LLVMIR: %7 = shl <4 x i32> %[[arg1]], %[[msk]]
 end subroutine vec_sl_i4
 
@@ -49,7 +50,7 @@ subroutine vec_sl_i8(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <2 x i64> %[[arg2]], <i64 64, i64 64>
+! LLVMIR: %[[msk:.*]] = urem <2 x i64> %[[arg2]], splat (i64 64)
 ! LLVMIR: %7 = shl <2 x i64> %[[arg1]], %[[msk]]
 end subroutine vec_sl_i8
 
@@ -61,7 +62,7 @@ subroutine vec_sl_u1(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <16 x i8> %[[arg2]], <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+! LLVMIR: %[[msk:.*]] = urem <16 x i8> %[[arg2]], splat (i8 8)
 ! LLVMIR: %7 = shl <16 x i8> %[[arg1]], %[[msk]]
 end subroutine vec_sl_u1
 
@@ -73,7 +74,7 @@ subroutine vec_sl_u2(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <8 x i16> %[[arg2]], <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+! LLVMIR: %[[msk:.*]] = urem <8 x i16> %[[arg2]], splat (i16 16)
 ! LLVMIR: %7 = shl <8 x i16> %[[arg1]], %[[msk]]
 end subroutine vec_sl_u2
 
@@ -85,7 +86,7 @@ subroutine vec_sl_u4(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <4 x i32> %[[arg2]], <i32 32, i32 32, i32 32, i32 32>
+! LLVMIR: %[[msk:.*]] = urem <4 x i32> %[[arg2]], splat (i32 32)
 ! LLVMIR: %7 = shl <4 x i32> %[[arg1]], %[[msk]]
 end subroutine vec_sl_u4
 
@@ -97,7 +98,7 @@ subroutine vec_sl_u8(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <2 x i64> %[[arg2]], <i64 64, i64 64>
+! LLVMIR: %[[msk:.*]] = urem <2 x i64> %[[arg2]], splat (i64 64)
 ! LLVMIR: %{{[0-9]+}} = shl <2 x i64> %[[arg1]], %[[msk]]
 end subroutine vec_sl_u8
 
@@ -542,7 +543,7 @@ subroutine vec_sr_i1(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <16 x i8> %[[arg2]], <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+! LLVMIR: %[[msk:.*]] = urem <16 x i8> %[[arg2]], splat (i8 8)
 ! LLVMIR: %7 = lshr <16 x i8> %[[arg1]], %[[msk]]
 end subroutine vec_sr_i1
 
@@ -554,7 +555,7 @@ subroutine vec_sr_i2(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <8 x i16> %[[arg2]], <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+! LLVMIR: %[[msk:.*]] = urem <8 x i16> %[[arg2]], splat (i16 16)
 ! LLVMIR: %7 = lshr <8 x i16> %[[arg1]], %[[msk]]
 end subroutine vec_sr_i2
 
@@ -566,7 +567,7 @@ subroutine vec_sr_i4(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <4 x i32> %[[arg2]], <i32 32, i32 32, i32 32, i32 32>
+! LLVMIR: %[[msk:.*]] = urem <4 x i32> %[[arg2]], splat (i32 32)
 ! LLVMIR: %7 = lshr <4 x i32> %[[arg1]], %[[msk]]
 end subroutine vec_sr_i4
 
@@ -578,7 +579,7 @@ subroutine vec_sr_i8(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <2 x i64> %[[arg2]], <i64 64, i64 64>
+! LLVMIR: %[[msk:.*]] = urem <2 x i64> %[[arg2]], splat (i64 64)
 ! LLVMIR: %7 = lshr <2 x i64> %[[arg1]], %[[msk]]
 end subroutine vec_sr_i8
 
@@ -590,7 +591,7 @@ subroutine vec_sr_u1(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <16 x i8> %[[arg2]], <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+! LLVMIR: %[[msk:.*]] = urem <16 x i8> %[[arg2]], splat (i8 8)
 ! LLVMIR: %7 = lshr <16 x i8> %[[arg1]], %[[msk]]
 end subroutine vec_sr_u1
 
@@ -602,7 +603,7 @@ subroutine vec_sr_u2(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <8 x i16> %[[arg2]], <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+! LLVMIR: %[[msk:.*]] = urem <8 x i16> %[[arg2]], splat (i16 16)
 ! LLVMIR: %7 = lshr <8 x i16> %[[arg1]], %[[msk]]
 end subroutine vec_sr_u2
 
@@ -614,7 +615,7 @@ subroutine vec_sr_u4(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <4 x i32> %[[arg2]], <i32 32, i32 32, i32 32, i32 32>
+! LLVMIR: %[[msk:.*]] = urem <4 x i32> %[[arg2]], splat (i32 32)
 ! LLVMIR: %7 = lshr <4 x i32> %[[arg1]], %[[msk]]
 end subroutine vec_sr_u4
 
@@ -626,7 +627,7 @@ subroutine vec_sr_u8(arg1, arg2)
 
 ! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
-! LLVMIR: %[[msk:.*]] = urem <2 x i64> %[[arg2]], <i64 64, i64 64>
+! LLVMIR: %[[msk:.*]] = urem <2 x i64> %[[arg2]], splat (i64 64)
 ! LLVMIR: %7 = lshr <2 x i64> %[[arg1]], %[[msk]]
 end subroutine vec_sr_u8
 

diff  --git a/flang/test/Lower/PowerPC/ppc-vec-splat.f90 b/flang/test/Lower/PowerPC/ppc-vec-splat.f90
index 9d4f7e3d98a3ac..17558926afd5f2 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-splat.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-splat.f90
@@ -607,7 +607,7 @@ subroutine vec_splat_s32testi8()
   vector(integer(4)) :: y
   y = vec_splat_s32(7_1)
 
-! LLVMIR: store <4 x i32> <i32 7, i32 7, i32 7, i32 7>, ptr %{{[0-9]}}, align 16
+! LLVMIR: store <4 x i32> splat (i32 7), ptr %{{[0-9]}}, align 16
 end subroutine vec_splat_s32testi8
 
 ! CHECK-LABEL: vec_splat_s32testi16
@@ -615,7 +615,7 @@ subroutine vec_splat_s32testi16()
   vector(integer(4)) :: y
   y = vec_splat_s32(7_2)
 
-! LLVMIR: store <4 x i32> <i32 7, i32 7, i32 7, i32 7>, ptr %{{[0-9]}}, align 16
+! LLVMIR: store <4 x i32> splat (i32 7), ptr %{{[0-9]}}, align 16
 end subroutine vec_splat_s32testi16
 
 ! CHECK-LABEL: vec_splat_s32testi32
@@ -623,7 +623,7 @@ subroutine vec_splat_s32testi32()
   vector(integer(4)) :: y
   y = vec_splat_s32(7_4)
 
-! LLVMIR: store <4 x i32> <i32 7, i32 7, i32 7, i32 7>, ptr %{{[0-9]}}, align 16
+! LLVMIR: store <4 x i32> splat (i32 7), ptr %{{[0-9]}}, align 16
 end subroutine vec_splat_s32testi32
 
 ! CHECK-LABEL: vec_splat_s32testi64
@@ -631,5 +631,5 @@ subroutine vec_splat_s32testi64()
   vector(integer(4)) :: y
   y = vec_splat_s32(7_8)
 
-! LLVMIR: store <4 x i32> <i32 7, i32 7, i32 7, i32 7>, ptr %{{[0-9]}}, align 16
+! LLVMIR: store <4 x i32> splat (i32 7), ptr %{{[0-9]}}, align 16
 end subroutine vec_splat_s32testi64


        


More information about the flang-commits mailing list