[flang-commits] [flang] [Flang] fix ppc-vec intrinsics testcases on AIX (NFC) (PR #74347)

via flang-commits flang-commits at lists.llvm.org
Thu Dec 14 11:00:38 PST 2023


https://github.com/madanial0 updated https://github.com/llvm/llvm-project/pull/74347

>From 8c8e3b423b726dfd68432724ebe0ae9c6b1c46fd Mon Sep 17 00:00:00 2001
From: Mark Danial <madanial at dixon.rtp.raleigh.ibm.com>
Date: Fri, 1 Dec 2023 03:33:20 -0500
Subject: [PATCH] [Flang] fix ppc-vec intrinsics testcases on AIX (NFC)

---
 flang/test/Lower/PowerPC/ppc-vec-convert.f90 |  24 ++--
 flang/test/Lower/PowerPC/ppc-vec-extract.f90 | 100 +++++++++++----
 flang/test/Lower/PowerPC/ppc-vec-insert.f90  |  99 +++++++++++----
 flang/test/Lower/PowerPC/ppc-vec-load.f90    |  73 ++++++-----
 flang/test/Lower/PowerPC/ppc-vec-perm.f90    |  53 ++++----
 flang/test/Lower/PowerPC/ppc-vec-splat.f90   | 123 ++++++++++++-------
 6 files changed, 321 insertions(+), 151 deletions(-)

diff --git a/flang/test/Lower/PowerPC/ppc-vec-convert.f90 b/flang/test/Lower/PowerPC/ppc-vec-convert.f90
index 14e247f83df67a..0f449a86dbe96f 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-convert.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-convert.f90
@@ -1,4 +1,5 @@
-! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-LE" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-BE" %s
 ! REQUIRES: target=powerpc{{.*}}
 
 !---------
@@ -1316,10 +1317,11 @@ subroutine vec_cvf_test_r4r8(arg1)
 
 ! LLVMIR: %[[arg:.*]] = load <2 x double>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[call:.*]] = call contract <4 x float> @llvm.ppc.vsx.xvcvdpsp(<2 x double> %[[arg]])
-! LLVMIR: %[[b:.*]] = bitcast <4 x float> %[[call]] to <16 x i8>
-! LLVMIR: %[[sh:.*]] = shufflevector <16 x i8> %[[b]], <16 x i8> %[[b]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11>
-! LLVMIR: %[[r:.*]] = bitcast <16 x i8> %[[sh]] to <4 x float>
-! LLVMIR: store <4 x float> %[[r]], ptr %{{.*}}, align 16
+! LLVMIR-LE: %[[b:.*]] = bitcast <4 x float> %[[call]] to <16 x i8>
+! LLVMIR-LE: %[[sh:.*]] = shufflevector <16 x i8> %[[b]], <16 x i8> %[[b]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11>
+! LLVMIR-LE: %[[r:.*]] = bitcast <16 x i8> %[[sh]] to <4 x float>
+! LLVMIR-LE: store <4 x float> %[[r]], ptr %{{.*}}, align 16
+! LLVMIR-BE: store <4 x float> %[[call]], ptr %{{.*}}, align 16
 end subroutine vec_cvf_test_r4r8
 
 ! CHECK-LABEL: vec_cvf_test_r8r4
@@ -1329,10 +1331,12 @@ subroutine vec_cvf_test_r8r4(arg1)
   r = vec_cvf(arg1)
 
 ! LLVMIR: %[[arg:.*]] = load <4 x float>, ptr %{{.*}}, align 16
-! LLVMIR: %[[bfi:.*]] = bitcast <4 x float> %[[arg]] to <16 x i8>
-! LLVMIR: %[[sh:.*]] = shufflevector <16 x i8> %[[bfi]], <16 x i8> %[[bfi]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11>
-! LLVMIR: %[[bif:.*]] = bitcast <16 x i8> %[[sh]] to <4 x float>
-! LLVMIR: %[[r:.*]] = call contract <2 x double> @llvm.ppc.vsx.xvcvspdp(<4 x float> %[[bif]])
-! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
+! LLVMIR-LE: %[[bfi:.*]] = bitcast <4 x float> %[[arg]] to <16 x i8>
+! LLVMIR-LE: %[[sh:.*]] = shufflevector <16 x i8> %[[bfi]], <16 x i8> %[[bfi]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11>
+! LLVMIR-LE: %[[bif:.*]] = bitcast <16 x i8> %[[sh]] to <4 x float>
+! LLVMIR-LE: %[[r:.*]] = call contract <2 x double> @llvm.ppc.vsx.xvcvspdp(<4 x float> %[[bif]])
+! LLVMIR-LE: store <2 x double> %[[r]], ptr %{{.*}}, align 16
+! LLVMIR-BE: %[[r:.*]] = call contract <2 x double> @llvm.ppc.vsx.xvcvspdp(<4 x float> %[[arg]])
+! LLVMIR-BE: store <2 x double> %[[call]], ptr %{{.*}}, align 16
 end subroutine vec_cvf_test_r8r4
 
diff --git a/flang/test/Lower/PowerPC/ppc-vec-extract.f90 b/flang/test/Lower/PowerPC/ppc-vec-extract.f90
index 1930c8b79d837c..0f279347b6b75c 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-extract.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-extract.f90
@@ -1,4 +1,5 @@
-! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-LE" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-BE" %s
 ! REQUIRES: target=powerpc{{.*}}
 
 !-------------
@@ -17,7 +18,9 @@ subroutine vec_extract_testf32(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 4
-! LLVMIR: %[[r:.*]] = extractelement <4 x float> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 3, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <4 x float> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <4 x float> %[[x]], i8 %[[s]]
 ! LLVMIR: store float %[[r]], ptr %{{[0-9]}}, align 4
 
   r = vec_extract(x, i2)
@@ -25,7 +28,9 @@ subroutine vec_extract_testf32(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 4
-! LLVMIR: %[[r:.*]] = extractelement <4 x float> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 3, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <4 x float> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <4 x float> %[[x]], i16 %[[s]]
 ! LLVMIR: store float %[[r]], ptr %{{[0-9]}}, align 4
 
   r = vec_extract(x, i4)
@@ -33,7 +38,9 @@ subroutine vec_extract_testf32(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 4
-! LLVMIR: %[[r:.*]] = extractelement <4 x float> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 3, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <4 x float> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <4 x float> %[[x]], i32 %[[s]]
 ! LLVMIR: store float %[[r]], ptr %{{[0-9]}}, align 4
 
   r = vec_extract(x, i8)
@@ -41,7 +48,9 @@ subroutine vec_extract_testf32(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 4
-! LLVMIR: %[[r:.*]] = extractelement <4 x float> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 3, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <4 x float> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <4 x float> %[[x]], i64 %[[s]]
 ! LLVMIR: store float %[[r]], ptr %{{[0-9]}}, align 4
 end subroutine vec_extract_testf32
 
@@ -58,7 +67,9 @@ subroutine vec_extract_testf64(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 2
-! LLVMIR: %[[r:.*]] = extractelement <2 x double> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 1, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <2 x double> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <2 x double> %[[x]], i8 %[[s]]
 ! LLVMIR: store double %[[r]], ptr %{{[0-9]}}, align 8
 
   r = vec_extract(x, i2)
@@ -66,15 +77,20 @@ subroutine vec_extract_testf64(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 2
-! LLVMIR: %[[r:.*]] = extractelement <2 x double> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 1, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <2 x double> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <2 x double> %[[x]], i16 %[[s]]
 ! LLVMIR: store double %[[r]], ptr %{{[0-9]}}, align 8
 
+
   r = vec_extract(x, i4)
 
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 2
-! LLVMIR: %[[r:.*]] = extractelement <2 x double> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 1, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <2 x double> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <2 x double> %[[x]], i32 %[[s]]
 ! LLVMIR: store double %[[r]], ptr %{{[0-9]}}, align 8
 
   r = vec_extract(x, i8)
@@ -82,7 +98,9 @@ subroutine vec_extract_testf64(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 2
-! LLVMIR: %[[r:.*]] = extractelement <2 x double> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 1, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <2 x double> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <2 x double> %[[x]], i64 %[[s]]
 ! LLVMIR: store double %[[r]], ptr %{{[0-9]}}, align 8
 end subroutine vec_extract_testf64
 
@@ -99,7 +117,9 @@ subroutine vec_extract_testi8(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 16
-! LLVMIR: %[[r:.*]] = extractelement <16 x i8> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 15, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i8 %[[s]]
 ! LLVMIR: store i8 %[[r]], ptr %{{[0-9]}}, align 1
 
   r = vec_extract(x, i2)
@@ -107,7 +127,9 @@ subroutine vec_extract_testi8(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 16
-! LLVMIR: %[[r:.*]] = extractelement <16 x i8> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 15, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i16 %[[s]]
 ! LLVMIR: store i8 %[[r]], ptr %{{[0-9]}}, align 1
 
   r = vec_extract(x, i4)
@@ -115,7 +137,9 @@ subroutine vec_extract_testi8(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 16
-! LLVMIR: %[[r:.*]] = extractelement <16 x i8> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 15, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i32 %[[s]]
 ! LLVMIR: store i8 %[[r]], ptr %{{[0-9]}}, align 1
 
   r = vec_extract(x, i8)
@@ -123,7 +147,9 @@ subroutine vec_extract_testi8(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 16
-! LLVMIR: %[[r:.*]] = extractelement <16 x i8> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 15, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i64 %[[s]]
 ! LLVMIR: store i8 %[[r]], ptr %{{[0-9]}}, align 1
 end subroutine vec_extract_testi8
 
@@ -140,7 +166,9 @@ subroutine vec_extract_testi16(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 8
-! LLVMIR: %[[r:.*]] = extractelement <8 x i16> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 7, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i8 %[[s]]
 ! LLVMIR: store i16 %[[r]], ptr %{{[0-9]}}, align 2
 
   r = vec_extract(x, i2)
@@ -148,7 +176,9 @@ subroutine vec_extract_testi16(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 8
-! LLVMIR: %[[r:.*]] = extractelement <8 x i16> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 7, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i16 %[[s]]
 ! LLVMIR: store i16 %[[r]], ptr %{{[0-9]}}, align 2
 
   r = vec_extract(x, i4)
@@ -156,7 +186,9 @@ subroutine vec_extract_testi16(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 8
-! LLVMIR: %[[r:.*]] = extractelement <8 x i16> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 7, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i32 %[[s]]
 ! LLVMIR: store i16 %[[r]], ptr %{{[0-9]}}, align 2
 
   r = vec_extract(x, i8)
@@ -164,7 +196,9 @@ subroutine vec_extract_testi16(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 8
-! LLVMIR: %[[r:.*]] = extractelement <8 x i16> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 7, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i64 %[[s]]
 ! LLVMIR: store i16 %[[r]], ptr %{{[0-9]}}, align 2
 end subroutine vec_extract_testi16
 
@@ -181,7 +215,9 @@ subroutine vec_extract_testi32(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 4
-! LLVMIR: %[[r:.*]] = extractelement <4 x i32> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 3, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i8 %[[s]]
 ! LLVMIR: store i32 %[[r]], ptr %{{[0-9]}}, align 4
 
   r = vec_extract(x, i2)
@@ -189,7 +225,9 @@ subroutine vec_extract_testi32(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 4
-! LLVMIR: %[[r:.*]] = extractelement <4 x i32> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 3, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i16 %[[s]]
 ! LLVMIR: store i32 %[[r]], ptr %{{[0-9]}}, align 4
 
   r = vec_extract(x, i4)
@@ -197,7 +235,9 @@ subroutine vec_extract_testi32(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 4
-! LLVMIR: %[[r:.*]] = extractelement <4 x i32> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 3, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i32 %[[s]]
 ! LLVMIR: store i32 %[[r]], ptr %{{[0-9]}}, align 4
 
   r = vec_extract(x, i8)
@@ -205,7 +245,9 @@ subroutine vec_extract_testi32(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 4
-! LLVMIR: %[[r:.*]] = extractelement <4 x i32> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 3, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i64 %[[s]]
 ! LLVMIR: store i32 %[[r]], ptr %{{[0-9]}}, align 4
 end subroutine vec_extract_testi32
 
@@ -222,7 +264,9 @@ subroutine vec_extract_testi64(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 2
-! LLVMIR: %[[r:.*]] = extractelement <2 x i64> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 1, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i8 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i8 %[[s]]
 ! LLVMIR: store i64 %[[r]], ptr %{{[0-9]}}, align 8
 
   r = vec_extract(x, i2)
@@ -230,7 +274,9 @@ subroutine vec_extract_testi64(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 2
-! LLVMIR: %[[r:.*]] = extractelement <2 x i64> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 1, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i16 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i16 %[[s]]
 ! LLVMIR: store i64 %[[r]], ptr %{{[0-9]}}, align 8
 
   r = vec_extract(x, i4)
@@ -238,7 +284,9 @@ subroutine vec_extract_testi64(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 2
-! LLVMIR: %[[r:.*]] = extractelement <2 x i64> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 1, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i32 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i32 %[[s]]
 ! LLVMIR: store i64 %[[r]], ptr %{{[0-9]}}, align 8
 
   r = vec_extract(x, i8)
@@ -246,6 +294,8 @@ subroutine vec_extract_testi64(x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 2
-! LLVMIR: %[[r:.*]] = extractelement <2 x i64> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 1, %[[u]]
+! LLVMIR-LE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i64 %[[u]]
+! LLVMIR-BE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i64 %[[s]]
 ! LLVMIR: store i64 %[[r]], ptr %{{[0-9]}}, align 8
 end subroutine vec_extract_testi64
diff --git a/flang/test/Lower/PowerPC/ppc-vec-insert.f90 b/flang/test/Lower/PowerPC/ppc-vec-insert.f90
index 3648be6ac027e3..dd57fcc67be080 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-insert.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-insert.f90
@@ -1,4 +1,5 @@
-! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-LE" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-BE" %s
 ! REQUIRES: target=powerpc{{.*}}
 
 ! vec_insert
@@ -18,7 +19,9 @@ subroutine vec_insert_testf32(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[urem:.*]] = urem i8 %[[i1]], 4
-! LLVMIR: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 3, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i8 %[[s]]
 ! LLVMIR: store <4 x float> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i2)
@@ -27,7 +30,9 @@ subroutine vec_insert_testf32(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[urem:.*]] = urem i16 %[[i2]], 4
-! LLVMIR: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 3, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i16 %[[s]]
 ! LLVMIR: store <4 x float> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i4)
@@ -36,7 +41,9 @@ subroutine vec_insert_testf32(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[urem:.*]] = urem i32 %[[i4]], 4
-! LLVMIR: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 3, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i32 %[[s]]
 ! LLVMIR: store <4 x float> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i8)
@@ -45,7 +52,9 @@ subroutine vec_insert_testf32(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[urem:.*]] = urem i64 %[[i8]], 4
-! LLVMIR: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 3, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <4 x float> %[[x]], float %[[v]], i64 %[[s]]
 ! LLVMIR: store <4 x float> %[[r]], ptr %{{[0-9]}}, align 16
 end subroutine vec_insert_testf32
 
@@ -64,7 +73,9 @@ subroutine vec_insert_testf64(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[urem:.*]] = urem i8 %[[i1]], 2
-! LLVMIR: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 1, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i8 %[[s]]
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i2)
@@ -73,7 +84,9 @@ subroutine vec_insert_testf64(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[urem:.*]] = urem i16 %[[i2]], 2
-! LLVMIR: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 1, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i16 %[[s]]
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i4)
@@ -82,7 +95,9 @@ subroutine vec_insert_testf64(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[urem:.*]] = urem i32 %[[i4]], 2
-! LLVMIR: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 1, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i32 %[[s]]
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i8)
@@ -91,7 +106,9 @@ subroutine vec_insert_testf64(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[urem:.*]] = urem i64 %[[i8]], 2
-! LLVMIR: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 1, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <2 x double> %[[x]], double %[[v]], i64 %[[s]]
 ! LLVMIR: store <2 x double> %[[r]], ptr %{{[0-9]}}, align 16
 end subroutine vec_insert_testf64
 
@@ -110,7 +127,9 @@ subroutine vec_insert_testi8(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[urem:.*]] = urem i8 %[[i1]], 16
-! LLVMIR: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 15, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i8 %[[s]]
 ! LLVMIR: store <16 x i8> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i2)
@@ -119,7 +138,9 @@ subroutine vec_insert_testi8(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[urem:.*]] = urem i16 %[[i2]], 16
-! LLVMIR: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 15, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i16 %[[s]]
 ! LLVMIR: store <16 x i8> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i4)
@@ -128,7 +149,9 @@ subroutine vec_insert_testi8(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[urem:.*]] = urem i32 %[[i4]], 16
-! LLVMIR: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 15, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i32 %[[s]]
 ! LLVMIR: store <16 x i8> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i8)
@@ -137,7 +160,9 @@ subroutine vec_insert_testi8(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[urem:.*]] = urem i64 %[[i8]], 16
-! LLVMIR: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 15, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <16 x i8> %[[x]], i8 %[[v]], i64 %[[s]]
 ! LLVMIR: store <16 x i8> %[[r]], ptr %{{[0-9]}}, align 16
 end subroutine vec_insert_testi8
 
@@ -156,7 +181,9 @@ subroutine vec_insert_testi16(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[urem:.*]] = urem i8 %[[i1]], 8
-! LLVMIR: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 7, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i8 %[[s]]
 ! LLVMIR: store <8 x i16> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i2)
@@ -165,7 +192,9 @@ subroutine vec_insert_testi16(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[urem:.*]] = urem i16 %[[i2]], 8
-! LLVMIR: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 7, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i16 %[[s]]
 ! LLVMIR: store <8 x i16> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i4)
@@ -174,7 +203,9 @@ subroutine vec_insert_testi16(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[urem:.*]] = urem i32 %[[i4]], 8
-! LLVMIR: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 7, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i32 %[[s]]
 ! LLVMIR: store <8 x i16> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i8)
@@ -183,7 +214,9 @@ subroutine vec_insert_testi16(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[urem:.*]] = urem i64 %[[i8]], 8
-! LLVMIR: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 7, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <8 x i16> %[[x]], i16 %[[v]], i64 %[[s]]
 ! LLVMIR: store <8 x i16> %[[r]], ptr %{{[0-9]}}, align 16
 end subroutine vec_insert_testi16
 
@@ -202,7 +235,9 @@ subroutine vec_insert_testi32(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[urem:.*]] = urem i8 %[[i1]], 4
-! LLVMIR: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 3, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i8 %[[s]]
 ! LLVMIR: store <4 x i32> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i2)
@@ -211,7 +246,9 @@ subroutine vec_insert_testi32(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[urem:.*]] = urem i16 %[[i2]], 4
-! LLVMIR: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 3, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i16 %[[s]]
 ! LLVMIR: store <4 x i32> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i4)
@@ -220,7 +257,9 @@ subroutine vec_insert_testi32(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[urem:.*]] = urem i32 %[[i4]], 4
-! LLVMIR: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 3, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i32 %[[s]]
 ! LLVMIR: store <4 x i32> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i8)
@@ -229,7 +268,9 @@ subroutine vec_insert_testi32(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[urem:.*]] = urem i64 %[[i8]], 4
-! LLVMIR: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 3, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <4 x i32> %[[x]], i32 %[[v]], i64 %[[s]]
 ! LLVMIR: store <4 x i32> %[[r]], ptr %{{[0-9]}}, align 16
 end subroutine vec_insert_testi32
 
@@ -248,7 +289,9 @@ subroutine vec_insert_testi64(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
 ! LLVMIR: %[[urem:.*]] = urem i8 %[[i1]], 2
-! LLVMIR: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i8 1, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i8 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i8 %[[s]]
 ! LLVMIR: store <2 x i64> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i2)
@@ -257,7 +300,9 @@ subroutine vec_insert_testi64(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
 ! LLVMIR: %[[urem:.*]] = urem i16 %[[i2]], 2
-! LLVMIR: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i16 1, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i16 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i16 %[[s]]
 ! LLVMIR: store <2 x i64> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i4)
@@ -266,7 +311,9 @@ subroutine vec_insert_testi64(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
 ! LLVMIR: %[[urem:.*]] = urem i32 %[[i4]], 2
-! LLVMIR: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i32 1, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i32 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i32 %[[s]]
 ! LLVMIR: store <2 x i64> %[[r]], ptr %{{[0-9]}}, align 16
 
   r = vec_insert(v, x, i8)
@@ -275,6 +322,8 @@ subroutine vec_insert_testi64(v, x, i1, i2, i4, i8)
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
 ! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
 ! LLVMIR: %[[urem:.*]] = urem i64 %[[i8]], 2
-! LLVMIR: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[s:.*]] = sub i64 1, %[[urem]]
+! LLVMIR-LE: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i64 %[[urem]]
+! LLVMIR-BE: %[[r:.*]] = insertelement <2 x i64> %[[x]], i64 %[[v]], i64 %[[s]]
 ! LLVMIR: store <2 x i64> %[[r]], ptr %{{[0-9]}}, align 16
 end subroutine vec_insert_testi64
diff --git a/flang/test/Lower/PowerPC/ppc-vec-load.f90 b/flang/test/Lower/PowerPC/ppc-vec-load.f90
index 1da6381905142c..4d51512df0f7b4 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-load.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-load.f90
@@ -1,4 +1,5 @@
-! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-LE" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-BE" %s
 ! REQUIRES: target=powerpc{{.*}}
 
 !----------------------
@@ -294,8 +295,9 @@ subroutine vec_lvsl_testi8s(arg1, arg2, res)
 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsl(ptr %[[addr]])
-! LLVMIR: %[[sv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-! LLVMIR: store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-LE: %[[sv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE: store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-BE: store <16 x i8> %[[ld]], ptr %2, align 16
 end subroutine vec_lvsl_testi8s
 
 ! CHECK-LABEL: @vec_lvsl_testi16a
@@ -311,8 +313,9 @@ subroutine vec_lvsl_testi16a(arg1, arg2, res)
 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsl(ptr %[[addr]])
-! LLVMIR: %[[sv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-! LLVMIR:  store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-LE: %[[sv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE:  store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-BE:  store <16 x i8> %[[ld]], ptr %2, align 16
 end subroutine vec_lvsl_testi16a
 
 ! CHECK-LABEL: @vec_lvsl_testi32a
@@ -328,8 +331,9 @@ subroutine vec_lvsl_testi32a(arg1, arg2, res)
 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsl(ptr %[[addr]])
-! LLVMIR: %[[sv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-! LLVMIR:  store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-LE: %[[sv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE:  store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-BE:  store <16 x i8> %[[ld]], ptr %2, align 16
 end subroutine vec_lvsl_testi32a
 
 ! CHECK-LABEL: @vec_lvsl_testf32a
@@ -344,8 +348,9 @@ subroutine vec_lvsl_testf32a(arg1, arg2, res)
 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsl(ptr %[[addr]])
-! LLVMIR: %[[sv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-! LLVMIR:  store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-LE: %[[sv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE:  store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-BE:  store <16 x i8> %[[ld]], ptr %2, align 16
 end subroutine vec_lvsl_testf32a
 
 !----------------------
@@ -365,8 +370,9 @@ subroutine vec_lvsr_testi8s(arg1, arg2, res)
 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
 ! LLVMIR: %[[ld:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
 ! LLVMIR: %[[addr:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsr(ptr %[[ld]])
-! LLVMIR: %[[sv:.*]] = shufflevector <16 x i8> %[[addr]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-! LLVMIR: store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-LE: %[[sv:.*]] = shufflevector <16 x i8> %[[addr]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE: store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-BE: store <16 x i8> %[[addr]], ptr %2, align 16
 end subroutine vec_lvsr_testi8s
 
 ! CHECK-LABEL: @vec_lvsr_testi16a
@@ -382,8 +388,9 @@ subroutine vec_lvsr_testi16a(arg1, arg2, res)
 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
 ! LLVMIR: %[[ld:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
 ! LLVMIR: %[[addr:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsr(ptr %[[ld]])
-! LLVMIR: %[[sv:.*]] = shufflevector <16 x i8> %[[addr]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-! LLVMIR: store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-LE: %[[sv:.*]] = shufflevector <16 x i8> %[[addr]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE: store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-BE: store <16 x i8> %[[addr]], ptr %2, align 16
 end subroutine vec_lvsr_testi16a
 
 ! CHECK-LABEL: @vec_lvsr_testi32a
@@ -399,8 +406,9 @@ subroutine vec_lvsr_testi32a(arg1, arg2, res)
 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
 ! LLVMIR: %[[ld:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
 ! LLVMIR: %[[addr:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsr(ptr %[[ld]])
-! LLVMIR: %[[sv:.*]] = shufflevector <16 x i8> %[[addr]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-! LLVMIR: store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-LE: %[[sv:.*]] = shufflevector <16 x i8> %[[addr]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE: store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-BE: store <16 x i8> %[[addr]], ptr %2, align 16
 end subroutine vec_lvsr_testi32a
 
 ! CHECK-LABEL: @vec_lvsr_testf32a
@@ -415,8 +423,9 @@ subroutine vec_lvsr_testf32a(arg1, arg2, res)
 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsr(ptr %[[addr]])
-! LLVMIR: %[[sv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-! LLVMIR: store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-LE: %[[sv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE: store <16 x i8> %[[sv]], ptr %2, align 16
+! LLVMIR-BE: store <16 x i8> %[[ld]], ptr %2, align 16
 end subroutine vec_lvsr_testf32a
 
 !----------------------
@@ -708,8 +717,9 @@ subroutine vec_xl_be_testi8a(arg1, arg2, res)
 ! LLVMIR: %[[arg1:.*]] = load i8, ptr %0, align 1
 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i8 %[[arg1]]
 ! LLVMIR: %[[ld:.*]] = load <16 x i8>, ptr %[[addr]], align 1
-! LLVMIR: %[[shff:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-! LLVMIR: store <16 x i8> %[[shff]], ptr %2, align 16
+! LLVMIR-LE: %[[shff:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE: store <16 x i8> %[[shff]], ptr %2, align 16
+! LLVMIR-BE: store <16 x i8> %[[ld]], ptr %2, align 16
 end subroutine vec_xl_be_testi8a
 
 ! CHECK-LABEL: @vec_xl_be_testi16a
@@ -722,8 +732,9 @@ subroutine vec_xl_be_testi16a(arg1, arg2, res)
 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[arg1]]
 ! LLVMIR: %[[ld:.*]] = load <8 x i16>, ptr %[[addr]], align 1
-! LLVMIR: %[[shff:.*]] = shufflevector <8 x i16> %[[ld]], <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-! LLVMIR: store <8 x i16> %[[shff]], ptr %2, align 16
+! LLVMIR-LE: %[[shff:.*]] = shufflevector <8 x i16> %[[ld]], <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE: store <8 x i16> %[[shff]], ptr %2, align 16
+! LLVMIR-BE: store <8 x i16> %[[ld]], ptr %2, align 16
 end subroutine vec_xl_be_testi16a
 
 ! CHECK-LABEL: @vec_xl_be_testi32a
@@ -736,8 +747,9 @@ subroutine vec_xl_be_testi32a(arg1, arg2, res)
 ! LLVMIR: %[[arg1:.*]] = load i32, ptr %0, align 4
 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i32 %[[arg1]]
 ! LLVMIR:  %[[ld:.*]] = load <4 x i32>, ptr %[[addr]], align 1
-! LLVMIR:  %[[shff:.*]] = shufflevector <4 x i32> %[[ld]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-! LLVMIR:  store <4 x i32> %[[shff]], ptr %2, align 16
+! LLVMIR-LE:  %[[shff:.*]] = shufflevector <4 x i32> %[[ld]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE:  store <4 x i32> %[[shff]], ptr %2, align 16
+! LLVMIR-BE:  store <4 x i32> %[[ld]], ptr %2, align 16
 end subroutine vec_xl_be_testi32a
 
 ! CHECK-LABEL: @vec_xl_be_testi64a
@@ -750,8 +762,9 @@ subroutine vec_xl_be_testi64a(arg1, arg2, res)
 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]]
 ! LLVMIR:  %[[ld:.*]] = load <2 x i64>, ptr %[[addr]], align 1
-! LLVMIR:  %[[shff:.*]] = shufflevector <2 x i64> %[[ld]], <2 x i64> undef, <2 x i32> <i32 1, i32 0>
-! LLVMIR:  store <2 x i64> %[[shff]], ptr %2, align 16
+! LLVMIR-LE:  %[[shff:.*]] = shufflevector <2 x i64> %[[ld]], <2 x i64> undef, <2 x i32> <i32 1, i32 0>
+! LLVMIR-LE:  store <2 x i64> %[[shff]], ptr %2, align 16
+! LLVMIR-BE:  store <2 x i64> %[[ld]], ptr %2, align 16
 end subroutine vec_xl_be_testi64a
 
 ! CHECK-LABEL: @vec_xl_be_testf32a
@@ -764,8 +777,9 @@ subroutine vec_xl_be_testf32a(arg1, arg2, res)
 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[arg1]]
 ! LLVMIR:  %[[ld:.*]] = load <4 x float>, ptr %[[addr]], align 1
-! LLVMIR:  %[[shff:.*]] = shufflevector <4 x float> %[[ld]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-! LLVMIR:  store <4 x float> %[[shff]], ptr %2, align 16
+! LLVMIR-LE:  %[[shff:.*]] = shufflevector <4 x float> %[[ld]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+! LLVMIR-LE:  store <4 x float> %[[shff]], ptr %2, align 16
+! LLVMIR-BE:  store <4 x float> %[[ld]], ptr %2, align 16
 end subroutine vec_xl_be_testf32a
 
 ! CHECK-LABEL: @vec_xl_be_testf64a
@@ -778,8 +792,9 @@ subroutine vec_xl_be_testf64a(arg1, arg2, res)
 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]]
 ! LLVMIR:  %[[ld:.*]] = load <2 x double>, ptr %[[addr]], align 1
-! LLVMIR:  %[[shff:.*]] = shufflevector <2 x double> %[[ld]], <2 x double> undef, <2 x i32> <i32 1, i32 0>
-! LLVMIR:  store <2 x double> %[[shff]], ptr %2, align 16
+! LLVMIR-LE:  %[[shff:.*]] = shufflevector <2 x double> %[[ld]], <2 x double> undef, <2 x i32> <i32 1, i32 0>
+! LLVMIR-LE:  store <2 x double> %[[shff]], ptr %2, align 16
+! LLVMIR-BE:  store <2 x double> %[[ld]], ptr %2, align 16
 end subroutine vec_xl_be_testf64a
 
 !----------------------
diff --git a/flang/test/Lower/PowerPC/ppc-vec-perm.f90 b/flang/test/Lower/PowerPC/ppc-vec-perm.f90
index 99a6295a014943..5353e9c10db7dd 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-perm.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-perm.f90
@@ -1,4 +1,5 @@
-! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-LE" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-BE" %s
 ! REQUIRES: target=powerpc{{.*}}
 
 ! CHECK-LABEL: vec_perm_test_i1
@@ -12,8 +13,9 @@ subroutine vec_perm_test_i1(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
-! LLVMIR: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-! LLVMIR: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <16 x i8>
 ! LLVMIR: store <16 x i8> %[[bcall]], ptr %{{.*}}, align 16
 end subroutine vec_perm_test_i1
@@ -29,8 +31,9 @@ subroutine vec_perm_test_i2(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
-! LLVMIR: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-! LLVMIR: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <8 x i16>
 ! LLVMIR: store <8 x i16> %[[bcall]], ptr %{{.*}}, align 16
 end subroutine vec_perm_test_i2
@@ -44,8 +47,9 @@ subroutine vec_perm_test_i4(arg1, arg2, arg3)
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
-! LLVMIR: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-! LLVMIR: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]], <16 x i8> %[[xor]])
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]], <16 x i8> %[[xor]])
+! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: store <4 x i32> %[[call]], ptr %{{.*}}, align 16
 end subroutine vec_perm_test_i4
 
@@ -60,8 +64,9 @@ subroutine vec_perm_test_i8(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <4 x i32>
-! LLVMIR: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-! LLVMIR: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <2 x i64>
 ! LLVMIR: store <2 x i64> %[[bcall]], ptr %{{.*}}, align 16
 end subroutine vec_perm_test_i8
@@ -77,8 +82,9 @@ subroutine vec_perm_test_u1(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
-! LLVMIR: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-! LLVMIR: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <16 x i8>
 ! LLVMIR: store <16 x i8> %[[bcall]], ptr %{{.*}}, align 16
 end subroutine vec_perm_test_u1
@@ -94,8 +100,9 @@ subroutine vec_perm_test_u2(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
-! LLVMIR: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-! LLVMIR: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <8 x i16>
 ! LLVMIR: store <8 x i16> %[[bcall]], ptr %{{.*}}, align 16
 end subroutine vec_perm_test_u2
@@ -109,8 +116,9 @@ subroutine vec_perm_test_u4(arg1, arg2, arg3)
 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
-! LLVMIR: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-! LLVMIR: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]], <16 x i8> %[[xor]])
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]], <16 x i8> %[[xor]])
+! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: store <4 x i32> %[[call]], ptr %{{.*}}, align 16
 end subroutine vec_perm_test_u4
 
@@ -125,8 +133,9 @@ subroutine vec_perm_test_u8(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <4 x i32>
-! LLVMIR: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-! LLVMIR: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <2 x i64>
 ! LLVMIR: store <2 x i64> %[[bcall]], ptr %{{.*}}, align 16
 end subroutine vec_perm_test_u8
@@ -142,8 +151,9 @@ subroutine vec_perm_test_r4(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <4 x i32>
-! LLVMIR: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-! LLVMIR: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <4 x float>
 ! LLVMIR: store <4 x float> %[[bcall]], ptr %{{.*}}, align 16
 end subroutine vec_perm_test_r4
@@ -159,8 +169,9 @@ subroutine vec_perm_test_r8(arg1, arg2, arg3)
 ! LLVMIR: %[[arg3:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
 ! LLVMIR: %[[barg1:.*]] = bitcast <2 x double> %[[arg1]] to <4 x i32>
 ! LLVMIR: %[[barg2:.*]] = bitcast <2 x double> %[[arg2]] to <4 x i32>
-! LLVMIR: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-! LLVMIR: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-LE: %[[xor:.*]] = xor <16 x i8> %[[arg3]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+! LLVMIR-LE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg2]], <4 x i32> %[[barg1]], <16 x i8> %[[xor]])
+! LLVMIR-BE: %[[call:.*]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> %[[barg1]], <4 x i32> %[[barg2]], <16 x i8> %[[arg3]])
 ! LLVMIR: %[[bcall:.*]] = bitcast <4 x i32> %[[call]] to <2 x double>
 ! LLVMIR: store <2 x double> %[[bcall]], ptr %{{.*}}, align 16
 end subroutine vec_perm_test_r8
diff --git a/flang/test/Lower/PowerPC/ppc-vec-splat.f90 b/flang/test/Lower/PowerPC/ppc-vec-splat.f90
index e21555781df29a..9d4f7e3d98a3ac 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-splat.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-splat.f90
@@ -1,4 +1,5 @@
-! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-LE" %s
+! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-BE" %s
 ! REQUIRES: target=powerpc{{.*}}
 
 !----------------
@@ -11,7 +12,8 @@ subroutine vec_splat_testi8i8(x)
   y = vec_splat(x, 0_1)
 
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 15
 ! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
 ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
@@ -23,7 +25,8 @@ subroutine vec_splat_testi8i16(x)
   y = vec_splat(x, 0_2)
 
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 15
 ! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
 ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
@@ -35,7 +38,8 @@ subroutine vec_splat_testi8i32(x)
   y = vec_splat(x, 0_4)
 
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 15
 ! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
 ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
@@ -47,7 +51,8 @@ subroutine vec_splat_testi8i64(x)
   y = vec_splat(x, 0_8)
 
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 15
 ! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
 ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
@@ -59,7 +64,8 @@ subroutine vec_splat_testi16i8(x)
   y = vec_splat(x, 0_1)
 
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 7
 ! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
 ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
@@ -71,7 +77,8 @@ subroutine vec_splat_testi16i16(x)
   y = vec_splat(x, 0_2)
 
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 7
 ! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
 ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
@@ -83,7 +90,8 @@ subroutine vec_splat_testi16i32(x)
   y = vec_splat(x, 0_4)
 
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 7
 ! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
 ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
@@ -95,7 +103,8 @@ subroutine vec_splat_testi16i64(x)
   y = vec_splat(x, 0_8)
 
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 7
 ! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
 ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
@@ -107,7 +116,8 @@ subroutine vec_splat_testi32i8(x)
   y = vec_splat(x, 0_1)
 
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
@@ -119,7 +129,8 @@ subroutine vec_splat_testi32i16(x)
   y = vec_splat(x, 0_2)
 
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
@@ -131,7 +142,8 @@ subroutine vec_splat_testi32i32(x)
   y = vec_splat(x, 0_4)
 
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
@@ -143,7 +155,8 @@ subroutine vec_splat_testi32i64(x)
   y = vec_splat(x, 0_8)
 
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
@@ -155,7 +168,8 @@ subroutine vec_splat_testi64i8(x)
   y = vec_splat(x, 0_1)
 
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
@@ -167,7 +181,8 @@ subroutine vec_splat_testi64i16(x)
   y = vec_splat(x, 0_2)
 
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
@@ -179,7 +194,8 @@ subroutine vec_splat_testi64i32(x)
   y = vec_splat(x, 0_4)
 
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
@@ -191,7 +207,8 @@ subroutine vec_splat_testi64i64(x)
   y = vec_splat(x, 0_8)
 
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
@@ -203,7 +220,8 @@ subroutine vec_splat_testf32i8(x)
   y = vec_splat(x, 0_1)
 
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x float> %[[x]], i8 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i8 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i8 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16
@@ -215,7 +233,8 @@ subroutine vec_splat_testf32i16(x)
   y = vec_splat(x, 0_2)
 
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x float> %[[x]], i16 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i16 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i16 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16
@@ -227,7 +246,8 @@ subroutine vec_splat_testf32i32(x)
   y = vec_splat(x, 0_4)
 
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x float> %[[x]], i32 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i32 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i32 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16
@@ -239,7 +259,8 @@ subroutine vec_splat_testf32i64(x)
   y = vec_splat(x, 0_8)
 
 ! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x float> %[[x]], i64 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i64 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x float> %[[x]], i64 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16
@@ -251,7 +272,8 @@ subroutine vec_splat_testf64i8(x)
   y = vec_splat(x, 0_1)
 
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x double> %[[x]], i8 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i8 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i8 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16
@@ -263,7 +285,8 @@ subroutine vec_splat_testf64i16(x)
   y = vec_splat(x, 0_2)
 
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x double> %[[x]], i16 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i16 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i16 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16
@@ -275,7 +298,8 @@ subroutine vec_splat_testf64i32(x)
   y = vec_splat(x, 0_4)
 
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x double> %[[x]], i32 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i32 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i32 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16
@@ -287,7 +311,8 @@ subroutine vec_splat_testf64i64(x)
   y = vec_splat(x, 0_8)
 
 ! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x double> %[[x]], i64 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i64 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x double> %[[x]], i64 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16
@@ -299,7 +324,8 @@ subroutine vec_splat_testu8i8(x)
   y = vec_splat(x, 0_1)
 
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 15
 ! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
 ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
@@ -311,7 +337,8 @@ subroutine vec_splat_testu8i16(x)
   y = vec_splat(x, 0_2)
 
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 15
 ! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
 ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
@@ -323,7 +350,8 @@ subroutine vec_splat_testu8i32(x)
   y = vec_splat(x, 0_4)
 
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 15
 ! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
 ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
@@ -335,7 +363,8 @@ subroutine vec_splat_testu8i64(x)
   y = vec_splat(x, 0_8)
 
 ! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 15
 ! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
 ! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
@@ -347,7 +376,8 @@ subroutine vec_splat_testu16i8(x)
   y = vec_splat(x, 0_1)
 
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 7
 ! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
 ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
@@ -359,7 +389,8 @@ subroutine vec_splat_testu16i16(x)
   y = vec_splat(x, 0_2)
 
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 7
 ! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
 ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
@@ -371,7 +402,8 @@ subroutine vec_splat_testu16i32(x)
   y = vec_splat(x, 0_4)
 
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 7
 ! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
 ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
@@ -383,7 +415,8 @@ subroutine vec_splat_testu16i64(x)
   y = vec_splat(x, 0_8)
 
 ! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 7
 ! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
 ! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
@@ -395,7 +428,8 @@ subroutine vec_splat_testu32i8(x)
   y = vec_splat(x, 0_1)
 
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
@@ -407,7 +441,8 @@ subroutine vec_splat_testu32i16(x)
   y = vec_splat(x, 0_2)
 
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
@@ -419,7 +454,8 @@ subroutine vec_splat_testu32i32(x)
   y = vec_splat(x, 0_4)
 
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
@@ -431,7 +467,8 @@ subroutine vec_splat_testu32i64(x)
   y = vec_splat(x, 0_8)
 
 ! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 3
 ! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
 ! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
@@ -443,7 +480,8 @@ subroutine vec_splat_testu64i8(x)
   y = vec_splat(x, 0_1)
 
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
@@ -455,7 +493,8 @@ subroutine vec_splat_testu64i16(x)
   y = vec_splat(x, 0_2)
 
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
@@ -467,7 +506,8 @@ subroutine vec_splat_testu64i32(x)
   y = vec_splat(x, 0_4)
 
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
@@ -479,7 +519,8 @@ subroutine vec_splat_testu64i64(x)
   y = vec_splat(x, 0_8)
 
 ! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
-! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 0
+! LLVMIR-LE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 0
+! LLVMIR-BE: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 1
 ! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
 ! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
 ! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16



More information about the flang-commits mailing list