[flang-commits] [flang] f50eaea - [flang] Add PowerPC vec_splat, vec_splats and vec_splat_s32 intrinsic

Kelvin Li via flang-commits flang-commits at lists.llvm.org
Mon Aug 14 13:23:30 PDT 2023


Author: Kelvin Li
Date: 2023-08-14T16:15:28-04:00
New Revision: f50eaea8ce1342732fdc02d494e077aaafc3b3e4

URL: https://github.com/llvm/llvm-project/commit/f50eaea8ce1342732fdc02d494e077aaafc3b3e4
DIFF: https://github.com/llvm/llvm-project/commit/f50eaea8ce1342732fdc02d494e077aaafc3b3e4.diff

LOG: [flang] Add PowerPC vec_splat, vec_splats and vec_splat_s32 intrinsic

Co-authored-by: Paul Scoropan <1paulscoropan at gmail.com>

Differential Revision: https://reviews.llvm.org/D157728

Added: 
    flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90
    flang/test/Lower/PowerPC/ppc-vec-splat.f90

Modified: 
    flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
    flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
    flang/lib/Semantics/check-call.cpp
    flang/module/__ppc_intrinsics.f90
    flang/test/Semantics/PowerPC/ppc-vector-intrinsics.f90

Removed: 
    


################################################################################
diff  --git a/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h b/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
index 964d9726a08e8d..d6116361a6ad7c 100644
--- a/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
+++ b/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
@@ -42,6 +42,9 @@ enum class VecOp {
   Sldw,
   Sll,
   Slo,
+  Splat,
+  Splat_s32,
+  Splats,
   Sr,
   Srl,
   Sro,
@@ -113,6 +116,15 @@ static inline VecTypeInfo getVecTypeFromFir(mlir::Value firVec) {
   return getVecTypeFromFirType(firVec.getType());
 }
 
+// Calculates the vector length and returns a VecTypeInfo with element type and
+// length.
+static inline VecTypeInfo getVecTypeFromEle(mlir::Value ele) {
+  VecTypeInfo vecTyInfo;
+  vecTyInfo.eleTy = ele.getType();
+  vecTyInfo.len = 16 / (vecTyInfo.eleTy.getIntOrFloatBitWidth() / 8);
+  return vecTyInfo;
+}
+
 // Converts array of fir vectors to mlir vectors.
 static inline llvm::SmallVector<mlir::Value, 4>
 convertVecArgs(fir::FirOpBuilder &builder, mlir::Location loc,
@@ -209,6 +221,10 @@ struct PPCIntrinsicLibrary : IntrinsicLibrary {
 
   template <VecOp>
   void genVecXStore(llvm::ArrayRef<fir::ExtendedValue>);
+
+  template <VecOp vop>
+  fir::ExtendedValue genVecSplat(mlir::Type resultType,
+                                 llvm::ArrayRef<fir::ExtendedValue> args);
 };
 
 const IntrinsicHandler *findPPCIntrinsicHandler(llvm::StringRef name);

diff  --git a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
index c0ba9809da6e7f..708b3a57740365 100644
--- a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
@@ -193,6 +193,21 @@ static constexpr IntrinsicHandler ppcHandlers[]{
          &PI::genVecShift<VecOp::Slo>),
      {{{"arg1", asValue}, {"arg2", asValue}}},
      /*isElemental=*/true},
+    {"__ppc_vec_splat",
+     static_cast<IntrinsicLibrary::ExtendedGenerator>(
+         &PI::genVecSplat<VecOp::Splat>),
+     {{{"arg1", asValue}, {"arg2", asValue}}},
+     /*isElemental=*/true},
+    {"__ppc_vec_splat_s32_",
+     static_cast<IntrinsicLibrary::ExtendedGenerator>(
+         &PI::genVecSplat<VecOp::Splat_s32>),
+     {{{"arg1", asValue}}},
+     /*isElemental=*/true},
+    {"__ppc_vec_splats",
+     static_cast<IntrinsicLibrary::ExtendedGenerator>(
+         &PI::genVecSplat<VecOp::Splats>),
+     {{{"arg1", asValue}}},
+     /*isElemental=*/true},
     {"__ppc_vec_sr",
      static_cast<IntrinsicLibrary::ExtendedGenerator>(
          &PI::genVecShift<VecOp::Sr>),
@@ -1608,6 +1623,53 @@ PPCIntrinsicLibrary::genVecShift(mlir::Type resultType,
   return shftRes;
 }
 
+// VEC_SPLAT, VEC_SPLATS, VEC_SPLAT_S32
+template <VecOp vop>
+fir::ExtendedValue
+PPCIntrinsicLibrary::genVecSplat(mlir::Type resultType,
+                                 llvm::ArrayRef<fir::ExtendedValue> args) {
+  auto context{builder.getContext()};
+  auto argBases{getBasesForArgs(args)};
+
+  mlir::vector::SplatOp splatOp{nullptr};
+  mlir::Type retTy{nullptr};
+  switch (vop) {
+  case VecOp::Splat: {
+    assert(args.size() == 2);
+    auto vecTyInfo{getVecTypeFromFir(argBases[0])};
+
+    auto extractOp{genVecExtract(resultType, args)};
+    splatOp = builder.create<mlir::vector::SplatOp>(
+        loc, *(extractOp.getUnboxed()), vecTyInfo.toMlirVectorType(context));
+    retTy = vecTyInfo.toFirVectorType();
+    break;
+  }
+  case VecOp::Splats: {
+    assert(args.size() == 1);
+    auto vecTyInfo{getVecTypeFromEle(argBases[0])};
+
+    splatOp = builder.create<mlir::vector::SplatOp>(
+        loc, argBases[0], vecTyInfo.toMlirVectorType(context));
+    retTy = vecTyInfo.toFirVectorType();
+    break;
+  }
+  case VecOp::Splat_s32: {
+    assert(args.size() == 1);
+    auto eleTy{builder.getIntegerType(32)};
+    auto intOp{builder.createConvert(loc, eleTy, argBases[0])};
+
+    // the intrinsic always returns vector(integer(4))
+    splatOp = builder.create<mlir::vector::SplatOp>(
+        loc, intOp, mlir::VectorType::get(4, eleTy));
+    retTy = fir::VectorType::get(4, eleTy);
+    break;
+  }
+  default:
+    llvm_unreachable("invalid vector operation for generator");
+  }
+  return builder.createConvert(loc, retTy, splatOp);
+}
+
 const char *getMmaIrIntrName(MMAOp mmaOp) {
   switch (mmaOp) {
   case MMAOp::AssembleAcc:

diff  --git a/flang/lib/Semantics/check-call.cpp b/flang/lib/Semantics/check-call.cpp
index 85aa1be81b6179..a2ce064dbadad7 100644
--- a/flang/lib/Semantics/check-call.cpp
+++ b/flang/lib/Semantics/check-call.cpp
@@ -1529,7 +1529,7 @@ bool CheckArgumentIsConstantExprInRange(
 
   if (*scalarValue < lowerBound || *scalarValue > upperBound) {
     messages.Say(
-        "Argument #%d must be a constant expression in range %d-%d"_err_en_US,
+        "Argument #%d must be a constant expression in range %d to %d"_err_en_US,
         index + 1, lowerBound, upperBound);
     return false;
   }
@@ -1560,6 +1560,29 @@ bool CheckPPCIntrinsic(const Symbol &generic, const Symbol &specific,
   if (specific.name().ToString().compare(0, 16, "__ppc_vec_permi_") == 0) {
     return CheckArgumentIsConstantExprInRange(actuals, 2, 0, 3, messages);
   }
+  if (specific.name().ToString().compare(0, 21, "__ppc_vec_splat_s32__") == 0) {
+    return CheckArgumentIsConstantExprInRange(actuals, 0, -16, 15, messages);
+  }
+  if (specific.name().ToString().compare(0, 16, "__ppc_vec_splat_") == 0) {
+    // The value of arg2 in vec_splat must be a constant expression that is
+    // greater than or equal to 0, and less than the number of elements in arg1.
+    auto *expr{actuals[0].value().UnwrapExpr()};
+    auto type{characteristics::TypeAndShape::Characterize(*expr, context)};
+    assert(type && "unknown type");
+    const auto *derived{evaluate::GetDerivedTypeSpec(type.value().type())};
+    if (derived && derived->IsVectorType()) {
+      for (const auto &pair : derived->parameters()) {
+        if (pair.first == "element_kind") {
+          auto vecElemKind{Fortran::evaluate::ToInt64(pair.second.GetExplicit())
+                               .value_or(0)};
+          auto numElem{vecElemKind == 0 ? 0 : (16 / vecElemKind)};
+          return CheckArgumentIsConstantExprInRange(
+              actuals, 1, 0, numElem - 1, messages);
+        }
+      }
+    } else
+      assert(false && "vector type is expected");
+  }
   return false;
 }
 

diff  --git a/flang/module/__ppc_intrinsics.f90 b/flang/module/__ppc_intrinsics.f90
index 8b2858c3922c0d..448429b668f955 100644
--- a/flang/module/__ppc_intrinsics.f90
+++ b/flang/module/__ppc_intrinsics.f90
@@ -42,11 +42,30 @@ elemental vector(real(VKIND1)) function elem_func_vr##VKIND1##vr##VKIND2(arg1);
   end function ;
 #define ELEM_FUNC_VRVR(VKIND) ELEM_FUNC_VRVR_2(VKIND, VKIND)
 
+! vector(i) function f(i)
+#define ELEM_FUNC_VII_2(RKIND, VKIND) \
+  elemental vector(integer(RKIND)) function elem_func_vi##RKIND##i##VKIND(arg1); \
+    integer(VKIND), intent(in) :: arg1; \
+  end function ;
+#define ELEM_FUNC_VII(VKIND) ELEM_FUNC_VII_2(VKIND, VKIND)
+
+! vector(r) function f(r)
+#define ELEM_FUNC_VRR(VKIND) \
+  elemental vector(real(VKIND)) function elem_func_vr##VKIND##r##VKIND(arg1); \
+    real(VKIND), intent(in) :: arg1; \
+  end function ;
+
   ELEM_FUNC_VIVI(1) ELEM_FUNC_VIVI(2) ELEM_FUNC_VIVI(4) ELEM_FUNC_VIVI(8)
   ELEM_FUNC_VUVU(1)
   ELEM_FUNC_VRVR_2(4,8) ELEM_FUNC_VRVR_2(8,4)
   ELEM_FUNC_VRVR(4) ELEM_FUNC_VRVR(8)
+  ELEM_FUNC_VII_2(4,1) ELEM_FUNC_VII_2(4,2) ELEM_FUNC_VII_2(4,8)
+  ELEM_FUNC_VII(1) ELEM_FUNC_VII(2) ELEM_FUNC_VII(4) ELEM_FUNC_VII(8)
+  ELEM_FUNC_VRR(4) ELEM_FUNC_VRR(8)
 
+#undef ELEM_FUNC_VRR
+#undef ELEM_FUNC_VII
+#undef ELEM_FUNC_VII_2
 #undef ELEM_FUNC_VRVR
 #undef ELEM_FUNC_VRVR_2
 #undef ELEM_FUNC_VUVU
@@ -150,6 +169,30 @@ elemental real(VKIND) function elem_func_r##VKIND##vr##VKIND##i(arg1, arg2); \
     !dir$ ignore_tkr(k) arg2; \
   end function ;
 
+! vector(i) function f(vector(i), i)
+#define ELEM_FUNC_VIVII0(VKIND) \
+  elemental vector(integer(VKIND)) function elem_func_vi##VKIND##vi##VKIND##i0(arg1, arg2); \
+    vector(integer(VKIND)), intent(in) :: arg1; \
+    integer(8), intent(in) :: arg2; \
+    !dir$ ignore_tkr(k) arg2; \
+  end function ;
+
+! vector(u) function f(vector(u), i)
+#define ELEM_FUNC_VUVUI0(VKIND) \
+  elemental vector(unsigned(VKIND)) function elem_func_vu##VKIND##vu##VKIND##i0(arg1, arg2); \
+    vector(unsigned(VKIND)), intent(in) :: arg1; \
+    integer(8), intent(in) :: arg2; \
+    !dir$ ignore_tkr(k) arg2; \
+  end function ;
+
+! vector(r) function f(vector(r), i)
+#define ELEM_FUNC_VRVRI0(VKIND) \
+  elemental vector(real(VKIND)) function elem_func_vr##VKIND##vr##VKIND##i0(arg1, arg2); \
+    vector(real(VKIND)), intent(in) :: arg1; \
+    integer(8), intent(in) :: arg2; \
+    !dir$ ignore_tkr(k) arg2; \
+  end function ;
+
 ! The following macros are specific for the vec_convert(v, mold) intrinsics as
 ! the argument keywords are 
diff erent from the other vector intrinsics.
 !
@@ -203,10 +246,16 @@ pure vector(real(VKIND)) function func_vec_convert_vr##VKIND##vi##vr##VKIND(v, m
   ELEM_FUNC_IVRVR(4,4) ELEM_FUNC_IVRVR(4,8)
   ELEM_FUNC_VRVII(4) ELEM_FUNC_VRVII(8)
   ELEM_FUNC_VRVUI(4) ELEM_FUNC_VRVUI(8)
-
+  ELEM_FUNC_VIVII0(1) ELEM_FUNC_VIVII0(2) ELEM_FUNC_VIVII0(4) ELEM_FUNC_VIVII0(8)
+  ELEM_FUNC_VUVUI0(1) ELEM_FUNC_VUVUI0(2) ELEM_FUNC_VUVUI0(4) ELEM_FUNC_VUVUI0(8)
+  ELEM_FUNC_VRVRI0(4) ELEM_FUNC_VRVRI0(8)
+  
 #undef FUNC_VEC_CONVERT_VRVIVR
 #undef FUNC_VEC_CONVERT_VUVIVU
 #undef FUNC_VEC_CONVERT_VIVIVI
+#undef ELEM_FUNC_VRVRI0
+#undef ELEM_FUNC_VUVUI0
+#undef ELEM_FUNC_VIVII0
 #undef ELEM_FUNC_RVRI
 #undef ELEM_FUNC_VRVUI
 #undef ELEM_FUNC_IVII
@@ -618,13 +667,16 @@ end function func_r8r8i
   end interface mtfsfi
   public :: mtfsfi
 
-!-------------------------
-! vector function(vector)
-!-------------------------
+!-----------------------------
+! vector function(vector/i/r)
+!-----------------------------
 #define VI_VI(NAME, VKIND) __ppc_##NAME##_vi##VKIND##vi##VKIND
 #define VU_VU(NAME, VKIND) __ppc_##NAME##_vu##VKIND##vu##VKIND
 #define VR_VR_2(NAME, VKIND1, VKIND2) __ppc_##NAME##_vr##VKIND1##vr##VKIND2
 #define VR_VR(NAME, VKIND) VR_VR_2(NAME, VKIND, VKIND)
+#define VI_I_2(NAME, RKIND, VKIND) __ppc_##NAME##_vi##RKIND##i##VKIND
+#define VI_I(NAME, VKIND) VI_I_2(NAME, VKIND, VKIND)
+#define VR_R(NAME, VKIND) __ppc_##NAME##_vr##VKIND##r##VKIND
 
 #define VEC_VI_VI(NAME, VKIND) \
   procedure(elem_func_vi##VKIND##vi##VKIND) :: VI_VI(NAME, VKIND);
@@ -633,6 +685,11 @@ end function func_r8r8i
 #define VEC_VR_VR_2(NAME, VKIND1, VKIND2) \
   procedure(elem_func_vr##VKIND1##vr##VKIND2) :: VR_VR_2(NAME, VKIND1, VKIND2);
 #define VEC_VR_VR(NAME, VKIND) VEC_VR_VR_2(NAME, VKIND, VKIND)
+#define VEC_VI_I_2(NAME, RKIND, VKIND) \
+  procedure(elem_func_vi##RKIND##i##VKIND) :: VI_I_2(NAME, RKIND, VKIND);
+#define VEC_VI_I(NAME, VKIND) VEC_VI_I_2(NAME, VKIND, VKIND)
+#define VEC_VR_R(NAME, VKIND) \
+  procedure(elem_func_vr##VKIND##r##VKIND) :: VR_R(NAME, VKIND);
 
 ! vec_abs
   VEC_VI_VI(vec_abs,1) VEC_VI_VI(vec_abs,2) VEC_VI_VI(vec_abs,4) VEC_VI_VI(vec_abs,8)
@@ -664,10 +721,32 @@ end function func_r8r8i
   end interface
   public vec_cvspbf16
 
+! vec_splats
+  VEC_VI_I(vec_splats,1) VEC_VI_I(vec_splats,2) VEC_VI_I(vec_splats,4) VEC_VI_I(vec_splats,8)
+  VEC_VR_R(vec_splats,4) VEC_VR_R(vec_splats,8)
+  interface vec_splats
+     procedure :: VI_I(vec_splats,1), VI_I(vec_splats,2), VI_I(vec_splats,4), VI_I(vec_splats,8)
+     procedure :: VR_R(vec_splats,4), VR_R(vec_splats,8)
+  end interface vec_splats
+  public :: vec_splats
+
+! vec_splat_32
+  VEC_VI_I_2(vec_splat_s32_,4,1) VEC_VI_I_2(vec_splat_s32_,4,2) VEC_VI_I_2(vec_splat_s32_,4,4) VEC_VI_I_2(vec_splat_s32_,4,8)
+  interface vec_splat_s32
+     procedure :: VI_I_2(vec_splat_s32_,4,1), VI_I_2(vec_splat_s32_,4,2), VI_I_2(vec_splat_s32_,4,4), VI_I_2(vec_splat_s32_,4,8)
+  end interface vec_splat_s32
+  public :: vec_splat_s32
+
+#undef VEC_VR_R
+#undef VEC_VI_I
+#undef VEC_VI_I_2
 #undef VEC_VR_VR
 #undef VEC_VR_VR_2
 #undef VEC_VU_VU
 #undef VEC_VI_VI
+#undef VR_R
+#undef VI_I
+#undef VI_I_2
 #undef VR_VR
 #undef VR_VR_2
 #undef VU_VU
@@ -1220,11 +1299,20 @@ end function func_r8r8i
 ! the `ignore_tkr' directive.
 #define VR_VI_I(NAME, VKIND) __ppc_##NAME##_vr##VKIND##vi##VKIND##i0
 #define VR_VU_I(NAME, VKIND) __ppc_##NAME##_vr##VKIND##vu##VKIND##i0
+#define VI_VI_I0(NAME, VKIND) __ppc_##NAME##_vi##VKIND##vi##VKIND##i0
+#define VU_VU_I0(NAME, VKIND) __ppc_##NAME##_vu##VKIND##vu##VKIND##i0
+#define VR_VR_I0(NAME, VKIND) __ppc_##NAME##_vr##VKIND##vr##VKIND##i0
 
 #define VEC_VR_VI_I(NAME, VKIND) \
   procedure(elem_func_vr##VKIND##vi##VKIND##i) :: VR_VI_I(NAME, VKIND);
 #define VEC_VR_VU_I(NAME, VKIND) \
   procedure(elem_func_vr##VKIND##vu##VKIND##i) :: VR_VU_I(NAME, VKIND);
+#define VEC_VI_VI_I0(NAME, VKIND) \
+  procedure(elem_func_vi##VKIND##vi##VKIND##i0) :: VI_VI_I0(NAME, VKIND);
+#define VEC_VU_VU_I0(NAME, VKIND) \
+  procedure(elem_func_vu##VKIND##vu##VKIND##i0) :: VU_VU_I0(NAME, VKIND);
+#define VEC_VR_VR_I0(NAME, VKIND) \
+  procedure(elem_func_vr##VKIND##vr##VKIND##i0) :: VR_VR_I0(NAME, VKIND);
 
 ! vec_ctf
   VEC_VR_VI_I(vec_ctf,4) VEC_VR_VI_I(vec_ctf,8)
@@ -1235,8 +1323,25 @@ end function func_r8r8i
   end interface vec_ctf
   public :: vec_ctf
 
+! vec_splat
+  VEC_VI_VI_I0(vec_splat,1) VEC_VI_VI_I0(vec_splat,2) VEC_VI_VI_I0(vec_splat,4) VEC_VI_VI_I0(vec_splat,8)
+  VEC_VU_VU_I0(vec_splat,1) VEC_VU_VU_I0(vec_splat,2) VEC_VU_VU_I0(vec_splat,4) VEC_VU_VU_I0(vec_splat,8)
+  VEC_VR_VR_I0(vec_splat,4) VEC_VR_VR_I0(vec_splat,8)
+  interface vec_splat
+     procedure :: VI_VI_I0(vec_splat,1), VI_VI_I0(vec_splat,2), VI_VI_I0(vec_splat,4), VI_VI_I0(vec_splat,8)
+     procedure :: VU_VU_I0(vec_splat,1), VU_VU_I0(vec_splat,2), VU_VU_I0(vec_splat,4), VU_VU_I0(vec_splat,8)
+     procedure :: VR_VR_I0(vec_splat,4), VR_VR_I0(vec_splat,8)
+  end interface vec_splat
+  public :: vec_splat
+
+#undef VEC_VR_VR_I0
+#undef VEC_VU_VU_I0
+#undef VEC_VI_VI_I0
 #undef VEC_VR_VU_I
 #undef VEC_VR_VI_I
+#undef VR_VR_I0
+#undef VU_VU_I0
+#undef VI_VI_I0
 #undef VR_VU_I
 #undef VR_VI_I
 

diff  --git a/flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90 b/flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90
new file mode 100644
index 00000000000000..f5145870d66966
--- /dev/null
+++ b/flang/test/Lower/PowerPC/ppc-vec-splat-elem-order.f90
@@ -0,0 +1,49 @@
+! RUN: bbc -emit-fir %s -fno-ppc-native-vector-element-order -o - | FileCheck --check-prefixes="FIR" %s
+! RUN: %flang_fc1 -emit-llvm %s -fno-ppc-native-vector-element-order -o - | FileCheck --check-prefixes="LLVMIR" %s
+! REQUIRES: target=powerpc{{.*}}
+
+! CHECK-LABEL: vec_splat_testf32i64
+subroutine vec_splat_testf32i64(x)
+  vector(real(4)) :: x, y
+  y = vec_splat(x, 0_8)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:f32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i64
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! FIR: %[[c:.*]] = arith.constant 4 : i64
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! FIR: %[[c2:.*]] = arith.constant 3 : i64
+! FIR: %[[sub:.*]] = llvm.sub %[[c2]], %[[u]]  : i64
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[sub]] : i64] : vector<4xf32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xf32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:f32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x float> %[[x]], i64 3
+! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testf32i64
+
+! CHECK-LABEL: vec_splat_testu8i16
+subroutine vec_splat_testu8i16(x)
+  vector(unsigned(1)) :: x, y
+  y = vec_splat(x, 0_2)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<16:ui8>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i16
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! FIR: %[[c:.*]] = arith.constant 16 : i16
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! FIR: %[[c2:.*]] = arith.constant 15 : i16
+! FIR: %[[sub:.*]] = llvm.sub %[[c2]], %[[u]]  : i16
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[sub]] : i16] : vector<16xi8>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<16xi8>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<16:ui8>>
+
+! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 15
+! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
+! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu8i16

diff  --git a/flang/test/Lower/PowerPC/ppc-vec-splat.f90 b/flang/test/Lower/PowerPC/ppc-vec-splat.f90
new file mode 100644
index 00000000000000..92ec128f17b509
--- /dev/null
+++ b/flang/test/Lower/PowerPC/ppc-vec-splat.f90
@@ -0,0 +1,1509 @@
+! RUN: bbc -emit-fir %s -o - | FileCheck --check-prefixes="FIR" %s
+! RUN: %flang_fc1 -emit-fir %s -o - | fir-opt --fir-to-llvm-ir | FileCheck --check-prefixes="MLIR" %s
+! RUN: %flang_fc1 -emit-llvm %s -o - | FileCheck --check-prefixes="CHECK" %s
+! REQUIRES: target=powerpc{{.*}}
+
+!----------------
+! vec_splat
+!----------------
+
+! CHECK-LABEL: vec_splat_testi8i8
+subroutine vec_splat_testi8i8(x)
+  vector(integer(1)) :: x, y
+  y = vec_splat(x, 0_1)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<16:i8>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i8
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! FIR: %[[c:.*]] = arith.constant 16 : i8
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i8] : vector<16xi8>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<16xi8>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<16:i8>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<16xi8>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i8) : i8
+! MLIR: %[[c:.*]] = llvm.mlir.constant(16 : i8) : i8
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i8] : vector<16xi8>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<16xi8>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<16xi8>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<16xi8>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<16xi8>>
+
+! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 0
+! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
+! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi8i8
+
+! CHECK-LABEL: vec_splat_testi8i16
+subroutine vec_splat_testi8i16(x)
+  vector(integer(1)) :: x, y
+  y = vec_splat(x, 0_2)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<16:i8>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i16
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! FIR: %[[c:.*]] = arith.constant 16 : i16
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i16] : vector<16xi8>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<16xi8>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<16:i8>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<16xi8>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i16) : i16
+! MLIR: %[[c:.*]] = llvm.mlir.constant(16 : i16) : i16
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i16] : vector<16xi8>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<16xi8>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<16xi8>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<16xi8>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<16xi8>>
+
+! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 0
+! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
+! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi8i16
+
+! CHECK-LABEL: vec_splat_testi8i32
+subroutine vec_splat_testi8i32(x)
+  vector(integer(1)) :: x, y
+  y = vec_splat(x, 0_4)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<16:i8>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i32
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! FIR: %[[c:.*]] = arith.constant 16 : i32
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i32] : vector<16xi8>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<16xi8>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<16:i8>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<16xi8>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[c:.*]] = llvm.mlir.constant(16 : i32) : i32
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i32] : vector<16xi8>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<16xi8>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<16xi8>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<16xi8>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<16xi8>>
+
+! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 0
+! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
+! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi8i32
+
+! CHECK-LABEL: vec_splat_testi8i64
+subroutine vec_splat_testi8i64(x)
+  vector(integer(1)) :: x, y
+  y = vec_splat(x, 0_8)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<16:i8>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i64
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! FIR: %[[c:.*]] = arith.constant 16 : i64
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i64] : vector<16xi8>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<16xi8>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<16:i8>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<16xi8>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i64) : i64
+! MLIR: %[[c:.*]] = llvm.mlir.constant(16 : i64) : i64
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i64] : vector<16xi8>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<16xi8>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<16xi8>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<16xi8>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<16xi8>>
+
+! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 0
+! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
+! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi8i64
+
+! CHECK-LABEL: vec_splat_testi16i8
+subroutine vec_splat_testi16i8(x)
+  vector(integer(2)) :: x, y
+  y = vec_splat(x, 0_1)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<8:i16>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i8
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! FIR: %[[c:.*]] = arith.constant 8 : i8
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i8] : vector<8xi16>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<8xi16>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<8:i16>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<8xi16>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i8) : i8
+! MLIR: %[[c:.*]] = llvm.mlir.constant(8 : i8) : i8
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i8] : vector<8xi16>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<8xi16>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<8xi16>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0] : vector<8xi16>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<8xi16>>
+
+! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 0
+! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
+! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi16i8
+
+! CHECK-LABEL: vec_splat_testi16i16
+subroutine vec_splat_testi16i16(x)
+  vector(integer(2)) :: x, y
+  y = vec_splat(x, 0_2)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<8:i16>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i16
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! FIR: %[[c:.*]] = arith.constant 8 : i16
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i16] : vector<8xi16>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<8xi16>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<8:i16>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<8xi16>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i16) : i16
+! MLIR: %[[c:.*]] = llvm.mlir.constant(8 : i16) : i16
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i16] : vector<8xi16>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<8xi16>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<8xi16>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0] : vector<8xi16>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<8xi16>>
+
+! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 0
+! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
+! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi16i16
+
+! CHECK-LABEL: vec_splat_testi16i32
+subroutine vec_splat_testi16i32(x)
+  vector(integer(2)) :: x, y
+  y = vec_splat(x, 0_4)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<8:i16>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i32
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! FIR: %[[c:.*]] = arith.constant 8 : i32
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i32] : vector<8xi16>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<8xi16>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<8:i16>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<8xi16>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[c:.*]] = llvm.mlir.constant(8 : i32) : i32
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i32] : vector<8xi16>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<8xi16>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<8xi16>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0] : vector<8xi16>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<8xi16>>
+
+! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 0
+! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
+! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi16i32
+
+! CHECK-LABEL: vec_splat_testi16i64
+subroutine vec_splat_testi16i64(x)
+  vector(integer(2)) :: x, y
+  y = vec_splat(x, 0_8)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<8:i16>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i64
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! FIR: %[[c:.*]] = arith.constant 8 : i64
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i64] : vector<8xi16>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<8xi16>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<8:i16>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<8xi16>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i64) : i64
+! MLIR: %[[c:.*]] = llvm.mlir.constant(8 : i64) : i64
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i64] : vector<8xi16>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<8xi16>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<8xi16>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0] : vector<8xi16>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<8xi16>>
+
+! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 0
+! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
+! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi16i64
+
+! CHECK-LABEL: vec_splat_testi32i8
+subroutine vec_splat_testi32i8(x)
+  vector(integer(4)) :: x, y
+  y = vec_splat(x, 0_1)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:i32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i8
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! FIR: %[[c:.*]] = arith.constant 4 : i8
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i8] : vector<4xi32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:i32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xi32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i8) : i8
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i8) : i8
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i8] : vector<4xi32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi32i8
+
+! CHECK-LABEL: vec_splat_testi32i16
+subroutine vec_splat_testi32i16(x)
+  vector(integer(4)) :: x, y
+  y = vec_splat(x, 0_2)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:i32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i16
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! FIR: %[[c:.*]] = arith.constant 4 : i16
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i16] : vector<4xi32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:i32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xi32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i16) : i16
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i16) : i16
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i16] : vector<4xi32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi32i16
+
+! CHECK-LABEL: vec_splat_testi32i32
+subroutine vec_splat_testi32i32(x)
+  vector(integer(4)) :: x, y
+  y = vec_splat(x, 0_4)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:i32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i32
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! FIR: %[[c:.*]] = arith.constant 4 : i32
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i32] : vector<4xi32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:i32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xi32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i32) : i32
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i32] : vector<4xi32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi32i32
+
+! CHECK-LABEL: vec_splat_testi32i64
+subroutine vec_splat_testi32i64(x)
+  vector(integer(4)) :: x, y
+  y = vec_splat(x, 0_8)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:i32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i64
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! FIR: %[[c:.*]] = arith.constant 4 : i64
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i64] : vector<4xi32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:i32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xi32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i64) : i64
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i64) : i64
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i64] : vector<4xi32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi32i64
+
+! CHECK-LABEL: vec_splat_testi64i8
+subroutine vec_splat_testi64i8(x)
+  vector(integer(8)) :: x, y
+  y = vec_splat(x, 0_1)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:i64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i8
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! FIR: %[[c:.*]] = arith.constant 2 : i8
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i8] : vector<2xi64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xi64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xi64>) -> !fir.vector<2:i64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:i64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xi64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i8) : i8
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i8) : i8
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i8] : vector<2xi64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xi64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xi64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xi64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xi64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi64i8
+
+! CHECK-LABEL: vec_splat_testi64i16
+subroutine vec_splat_testi64i16(x)
+  vector(integer(8)) :: x, y
+  y = vec_splat(x, 0_2)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:i64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i16
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! FIR: %[[c:.*]] = arith.constant 2 : i16
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i16] : vector<2xi64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xi64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xi64>) -> !fir.vector<2:i64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:i64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xi64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i16) : i16
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i16) : i16
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i16] : vector<2xi64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xi64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xi64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xi64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xi64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi64i16
+
+! CHECK-LABEL: vec_splat_testi64i32
+subroutine vec_splat_testi64i32(x)
+  vector(integer(8)) :: x, y
+  y = vec_splat(x, 0_4)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:i64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i32
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! FIR: %[[c:.*]] = arith.constant 2 : i32
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i32] : vector<2xi64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xi64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xi64>) -> !fir.vector<2:i64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:i64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xi64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i32) : i32
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i32] : vector<2xi64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xi64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xi64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xi64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xi64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi64i32
+
+! CHECK-LABEL: vec_splat_testi64i64
+subroutine vec_splat_testi64i64(x)
+  vector(integer(8)) :: x, y
+  y = vec_splat(x, 0_8)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:i64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i64
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! FIR: %[[c:.*]] = arith.constant 2 : i64
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i64] : vector<2xi64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xi64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xi64>) -> !fir.vector<2:i64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:i64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xi64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i64) : i64
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i64) : i64
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i64] : vector<2xi64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xi64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xi64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xi64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xi64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testi64i64
+
+! CHECK-LABEL: vec_splat_testf32i8
+subroutine vec_splat_testf32i8(x)
+  vector(real(4)) :: x, y
+  y = vec_splat(x, 0_1)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:f32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i8
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! FIR: %[[c:.*]] = arith.constant 4 : i8
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i8] : vector<4xf32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xf32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:f32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xf32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i8) : i8
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i8) : i8
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i8] : vector<4xf32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xf32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xf32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xf32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xf32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x float> %[[x]], i8 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testf32i8
+
+! CHECK-LABEL: vec_splat_testf32i16
+subroutine vec_splat_testf32i16(x)
+  vector(real(4)) :: x, y
+  y = vec_splat(x, 0_2)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:f32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i16
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! FIR: %[[c:.*]] = arith.constant 4 : i16
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i16] : vector<4xf32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xf32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:f32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xf32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i16) : i16
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i16) : i16
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i16] : vector<4xf32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xf32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xf32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xf32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xf32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x float> %[[x]], i16 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testf32i16
+
+! CHECK-LABEL: vec_splat_testf32i32
+subroutine vec_splat_testf32i32(x)
+  vector(real(4)) :: x, y
+  y = vec_splat(x, 0_4)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:f32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i32
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! FIR: %[[c:.*]] = arith.constant 4 : i32
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i32] : vector<4xf32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xf32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:f32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xf32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i32) : i32
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i32] : vector<4xf32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xf32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xf32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xf32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xf32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x float> %[[x]], i32 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testf32i32
+
+! CHECK-LABEL: vec_splat_testf32i64
+subroutine vec_splat_testf32i64(x)
+  vector(real(4)) :: x, y
+  y = vec_splat(x, 0_8)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:f32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i64
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! FIR: %[[c:.*]] = arith.constant 4 : i64
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i64] : vector<4xf32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xf32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:f32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xf32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i64) : i64
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i64) : i64
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i64] : vector<4xf32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xf32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xf32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xf32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xf32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x float> %[[x]], i64 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testf32i64
+
+! CHECK-LABEL: vec_splat_testf64i8
+subroutine vec_splat_testf64i8(x)
+  vector(real(8)) :: x, y
+  y = vec_splat(x, 0_1)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:f64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i8
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! FIR: %[[c:.*]] = arith.constant 2 : i8
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i8] : vector<2xf64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xf64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xf64>) -> !fir.vector<2:f64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:f64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xf64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i8) : i8
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i8) : i8
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i8] : vector<2xf64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xf64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xf64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xf64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xf64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x double> %[[x]], i8 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testf64i8
+
+! CHECK-LABEL: vec_splat_testf64i16
+subroutine vec_splat_testf64i16(x)
+  vector(real(8)) :: x, y
+  y = vec_splat(x, 0_2)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:f64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i16
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! FIR: %[[c:.*]] = arith.constant 2 : i16
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i16] : vector<2xf64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xf64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xf64>) -> !fir.vector<2:f64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:f64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xf64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i16) : i16
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i16) : i16
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i16] : vector<2xf64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xf64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xf64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xf64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xf64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x double> %[[x]], i16 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testf64i16
+
+! CHECK-LABEL: vec_splat_testf64i32
+subroutine vec_splat_testf64i32(x)
+  vector(real(8)) :: x, y
+  y = vec_splat(x, 0_4)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:f64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i32
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! FIR: %[[c:.*]] = arith.constant 2 : i32
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i32] : vector<2xf64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xf64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xf64>) -> !fir.vector<2:f64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:f64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xf64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i32) : i32
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i32] : vector<2xf64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xf64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xf64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xf64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xf64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x double> %[[x]], i32 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testf64i32
+
+! CHECK-LABEL: vec_splat_testf64i64
+subroutine vec_splat_testf64i64(x)
+  vector(real(8)) :: x, y
+  y = vec_splat(x, 0_8)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:f64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i64
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! FIR: %[[c:.*]] = arith.constant 2 : i64
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i64] : vector<2xf64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xf64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xf64>) -> !fir.vector<2:f64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:f64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xf64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i64) : i64
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i64) : i64
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i64] : vector<2xf64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xf64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xf64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xf64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xf64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x double> %[[x]], i64 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testf64i64
+
+! CHECK-LABEL: vec_splat_testu8i8
+subroutine vec_splat_testu8i8(x)
+  vector(unsigned(1)) :: x, y
+  y = vec_splat(x, 0_1)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<16:ui8>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i8
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! FIR: %[[c:.*]] = arith.constant 16 : i8
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i8] : vector<16xi8>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<16xi8>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<16:ui8>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<16xi8>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i8) : i8
+! MLIR: %[[c:.*]] = llvm.mlir.constant(16 : i8) : i8
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i8] : vector<16xi8>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<16xi8>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<16xi8>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<16xi8>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<16xi8>>
+
+! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i8 0
+! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
+! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu8i8
+
+! CHECK-LABEL: vec_splat_testu8i16
+subroutine vec_splat_testu8i16(x)
+  vector(unsigned(1)) :: x, y
+  y = vec_splat(x, 0_2)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<16:ui8>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i16
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! FIR: %[[c:.*]] = arith.constant 16 : i16
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i16] : vector<16xi8>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<16xi8>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<16:ui8>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<16xi8>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i16) : i16
+! MLIR: %[[c:.*]] = llvm.mlir.constant(16 : i16) : i16
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i16] : vector<16xi8>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<16xi8>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<16xi8>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<16xi8>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<16xi8>>
+
+! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i16 0
+! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
+! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu8i16
+
+! CHECK-LABEL: vec_splat_testu8i32
+subroutine vec_splat_testu8i32(x)
+  vector(unsigned(1)) :: x, y
+  y = vec_splat(x, 0_4)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<16:ui8>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i32
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! FIR: %[[c:.*]] = arith.constant 16 : i32
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i32] : vector<16xi8>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<16xi8>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<16:ui8>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<16xi8>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[c:.*]] = llvm.mlir.constant(16 : i32) : i32
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i32] : vector<16xi8>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<16xi8>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<16xi8>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<16xi8>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<16xi8>>
+
+! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i32 0
+! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
+! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu8i32
+
+! CHECK-LABEL: vec_splat_testu8i64
+subroutine vec_splat_testu8i64(x)
+  vector(unsigned(1)) :: x, y
+  y = vec_splat(x, 0_8)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<16:ui8>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i64
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! FIR: %[[c:.*]] = arith.constant 16 : i64
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i64] : vector<16xi8>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<16xi8>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<16:ui8>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<16xi8>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i64) : i64
+! MLIR: %[[c:.*]] = llvm.mlir.constant(16 : i64) : i64
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i64] : vector<16xi8>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<16xi8>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<16xi8>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<16xi8>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<16xi8>>
+
+! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <16 x i8> %[[x]], i64 0
+! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
+! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu8i64
+
+! CHECK-LABEL: vec_splat_testu16i8
+subroutine vec_splat_testu16i8(x)
+  vector(unsigned(2)) :: x, y
+  y = vec_splat(x, 0_1)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<8:ui16>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i8
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! FIR: %[[c:.*]] = arith.constant 8 : i8
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i8] : vector<8xi16>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<8xi16>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<8:ui16>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<8xi16>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i8) : i8
+! MLIR: %[[c:.*]] = llvm.mlir.constant(8 : i8) : i8
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i8] : vector<8xi16>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<8xi16>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<8xi16>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0] : vector<8xi16>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<8xi16>>
+
+! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i8 0
+! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
+! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu16i8
+
+! CHECK-LABEL: vec_splat_testu16i16
+subroutine vec_splat_testu16i16(x)
+  vector(unsigned(2)) :: x, y
+  y = vec_splat(x, 0_2)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<8:ui16>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i16
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! FIR: %[[c:.*]] = arith.constant 8 : i16
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i16] : vector<8xi16>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<8xi16>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<8:ui16>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<8xi16>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i16) : i16
+! MLIR: %[[c:.*]] = llvm.mlir.constant(8 : i16) : i16
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i16] : vector<8xi16>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<8xi16>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<8xi16>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0] : vector<8xi16>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<8xi16>>
+
+! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i16 0
+! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
+! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu16i16
+
+! CHECK-LABEL: vec_splat_testu16i32
+subroutine vec_splat_testu16i32(x)
+  vector(unsigned(2)) :: x, y
+  y = vec_splat(x, 0_4)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<8:ui16>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i32
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! FIR: %[[c:.*]] = arith.constant 8 : i32
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i32] : vector<8xi16>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<8xi16>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<8:ui16>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<8xi16>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[c:.*]] = llvm.mlir.constant(8 : i32) : i32
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i32] : vector<8xi16>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<8xi16>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<8xi16>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0] : vector<8xi16>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<8xi16>>
+
+! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i32 0
+! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
+! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu16i32
+
+! CHECK-LABEL: vec_splat_testu16i64
+subroutine vec_splat_testu16i64(x)
+  vector(unsigned(2)) :: x, y
+  y = vec_splat(x, 0_8)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<8:ui16>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i64
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! FIR: %[[c:.*]] = arith.constant 8 : i64
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i64] : vector<8xi16>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<8xi16>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<8:ui16>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<8xi16>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i64) : i64
+! MLIR: %[[c:.*]] = llvm.mlir.constant(8 : i64) : i64
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i64] : vector<8xi16>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<8xi16>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<8xi16>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0] : vector<8xi16>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<8xi16>>
+
+! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <8 x i16> %[[x]], i64 0
+! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
+! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu16i64
+
+! CHECK-LABEL: vec_splat_testu32i8
+subroutine vec_splat_testu32i8(x)
+  vector(unsigned(4)) :: x, y
+  y = vec_splat(x, 0_1)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:ui32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i8
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! FIR: %[[c:.*]] = arith.constant 4 : i8
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i8] : vector<4xi32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:ui32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xi32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i8) : i8
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i8) : i8
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i8] : vector<4xi32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i8 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu32i8
+
+! CHECK-LABEL: vec_splat_testu32i16
+subroutine vec_splat_testu32i16(x)
+  vector(unsigned(4)) :: x, y
+  y = vec_splat(x, 0_2)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:ui32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i16
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! FIR: %[[c:.*]] = arith.constant 4 : i16
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i16] : vector<4xi32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:ui32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xi32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i16) : i16
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i16) : i16
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i16] : vector<4xi32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i16 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu32i16
+
+! CHECK-LABEL: vec_splat_testu32i32
+subroutine vec_splat_testu32i32(x)
+  vector(unsigned(4)) :: x, y
+  y = vec_splat(x, 0_4)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:ui32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i32
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! FIR: %[[c:.*]] = arith.constant 4 : i32
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i32] : vector<4xi32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:ui32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xi32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i32) : i32
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i32] : vector<4xi32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i32 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu32i32
+
+! CHECK-LABEL: vec_splat_testu32i64
+subroutine vec_splat_testu32i64(x)
+  vector(unsigned(4)) :: x, y
+  y = vec_splat(x, 0_8)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<4:ui32>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i64
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! FIR: %[[c:.*]] = arith.constant 4 : i64
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i64] : vector<4xi32>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:ui32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<4xi32>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i64) : i64
+! MLIR: %[[c:.*]] = llvm.mlir.constant(4 : i64) : i64
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i64] : vector<4xi32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <4 x i32> %[[x]], i64 0
+! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu32i64
+
+! CHECK-LABEL: vec_splat_testu64i8
+subroutine vec_splat_testu64i8(x)
+  vector(unsigned(8)) :: x, y
+  y = vec_splat(x, 0_1)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:ui64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i8
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! FIR: %[[c:.*]] = arith.constant 2 : i8
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i8] : vector<2xi64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xi64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xi64>) -> !fir.vector<2:ui64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:ui64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xi64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i8) : i8
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i8) : i8
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i8
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i8] : vector<2xi64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xi64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xi64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xi64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xi64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i8 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu64i8
+
+! CHECK-LABEL: vec_splat_testu64i16
+subroutine vec_splat_testu64i16(x)
+  vector(unsigned(8)) :: x, y
+  y = vec_splat(x, 0_2)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:ui64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i16
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! FIR: %[[c:.*]] = arith.constant 2 : i16
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i16] : vector<2xi64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xi64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xi64>) -> !fir.vector<2:ui64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:ui64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xi64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i16) : i16
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i16) : i16
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i16
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i16] : vector<2xi64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xi64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xi64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xi64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xi64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i16 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu64i16
+
+! CHECK-LABEL: vec_splat_testu64i32
+subroutine vec_splat_testu64i32(x)
+  vector(unsigned(8)) :: x, y
+  y = vec_splat(x, 0_4)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:ui64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i32
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! FIR: %[[c:.*]] = arith.constant 2 : i32
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i32] : vector<2xi64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xi64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xi64>) -> !fir.vector<2:ui64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:ui64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xi64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i32) : i32
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i32
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i32] : vector<2xi64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xi64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xi64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xi64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xi64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i32 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu64i32
+
+! CHECK-LABEL: vec_splat_testu64i64
+subroutine vec_splat_testu64i64(x)
+  vector(unsigned(8)) :: x, y
+  y = vec_splat(x, 0_8)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<!fir.vector<2:ui64>>
+! FIR: %[[idx:.*]] = arith.constant 0 : i64
+! FIR: %[[vx:.*]] = fir.convert %[[x]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! FIR: %[[c:.*]] = arith.constant 2 : i64
+! FIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! FIR: %[[ele:.*]] = vector.extractelement %[[vx]][%[[u]] : i64] : vector<2xi64>
+! FIR: %[[vy:.*]] = vector.splat %[[ele]] : vector<2xi64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xi64>) -> !fir.vector<2:ui64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:ui64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<vector<2xi64>>
+! MLIR: %[[idx:.*]] = llvm.mlir.constant(0 : i64) : i64
+! MLIR: %[[c:.*]] = llvm.mlir.constant(2 : i64) : i64
+! MLIR: %[[u:.*]] = llvm.urem %[[idx]], %[[c]]  : i64
+! MLIR: %[[ele:.*]] = llvm.extractelement %[[x]][%[[u]] : i64] : vector<2xi64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xi64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[ele]], %[[undef]][%[[zero]] : i32] : vector<2xi64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xi64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xi64>>
+
+! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
+! LLVMIR: %[[ele:.*]] = extractelement <2 x i64> %[[x]], i64 0
+! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[ele]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_testu64i64
+
+!----------------
+! vec_splats
+!----------------
+
+! CHECK-LABEL: vec_splats_testi8
+subroutine vec_splats_testi8(x)
+  integer(1) :: x
+  vector(integer(1)) :: y
+  y = vec_splats(x)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<i8>
+! FIR: %[[vy:.*]] = vector.splat %[[x]] : vector<16xi8>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<16:i8>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<i8>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<16xi8>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[x]], %[[undef]][%[[zero]] : i32] : vector<16xi8>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<16xi8>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<16xi8>>
+
+! LLVMIR: %[[x:.*]] = load i8, ptr %{{[0-9]}}, align 1
+! LLVMIR: %[[ins:.*]] = insertelement <16 x i8> undef, i8 %[[x]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <16 x i8> %[[ins]], <16 x i8> undef, <16 x i32> zeroinitializer
+! LLVMIR: store <16 x i8> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splats_testi8
+
+! CHECK-LABEL: vec_splats_testi16
+subroutine vec_splats_testi16(x)
+  integer(2) :: x
+  vector(integer(2)) :: y
+  y = vec_splats(x)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<i16>
+! FIR: %[[vy:.*]] = vector.splat %[[x]] : vector<8xi16>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<8:i16>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<i16>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<8xi16>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[x]], %[[undef]][%[[zero]] : i32] : vector<8xi16>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0, 0, 0, 0, 0] : vector<8xi16>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<8xi16>>
+
+! LLVMIR: %[[x:.*]] = load i16, ptr %{{[0-9]}}, align 2
+! LLVMIR: %[[ins:.*]] = insertelement <8 x i16> undef, i16 %[[x]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <8 x i16> %[[ins]], <8 x i16> undef, <8 x i32> zeroinitializer
+! LLVMIR: store <8 x i16> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splats_testi16
+
+! CHECK-LABEL: vec_splats_testi32
+subroutine vec_splats_testi32(x)
+  integer(4) :: x
+  vector(integer(4)) :: y
+  y = vec_splats(x)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<i32>
+! FIR: %[[vy:.*]] = vector.splat %[[x]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:i32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<i32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[x]], %[[undef]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: %[[x:.*]] = load i32, ptr %{{[0-9]}}, align 4
+! LLVMIR: %[[ins:.*]] = insertelement <4 x i32> undef, i32 %[[x]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x i32> %[[ins]], <4 x i32> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x i32> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splats_testi32
+
+! CHECK-LABEL: vec_splats_testi64
+subroutine vec_splats_testi64(x)
+  integer(8) :: x
+  vector(integer(8)) :: y
+  y = vec_splats(x)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<i64>
+! FIR: %[[vy:.*]] = vector.splat %[[x]] : vector<2xi64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xi64>) -> !fir.vector<2:i64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:i64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<i64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xi64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[x]], %[[undef]][%[[zero]] : i32] : vector<2xi64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xi64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xi64>>
+
+! LLVMIR: %[[x:.*]] = load i64, ptr %{{[0-9]}}, align 8
+! LLVMIR: %[[ins:.*]] = insertelement <2 x i64> undef, i64 %[[x]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x i64> %[[ins]], <2 x i64> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x i64> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splats_testi64
+
+! CHECK-LABEL: vec_splats_testf32
+subroutine vec_splats_testf32(x)
+  real(4) :: x
+  vector(real(4)) :: y
+  y = vec_splats(x)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<f32>
+! FIR: %[[vy:.*]] = vector.splat %[[x]] : vector<4xf32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:f32>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<f32>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<4xf32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[x]], %[[undef]][%[[zero]] : i32] : vector<4xf32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0, 0, 0] : vector<4xf32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xf32>>
+
+! LLVMIR: %[[x:.*]] = load float, ptr %{{[0-9]}}, align 4
+! LLVMIR: %[[ins:.*]] = insertelement <4 x float> undef, float %[[x]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <4 x float> %[[ins]], <4 x float> undef, <4 x i32> zeroinitializer
+! LLVMIR: store <4 x float> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splats_testf32
+
+! CHECK-LABEL: vec_splats_testf64
+subroutine vec_splats_testf64(x)
+  real(8) :: x
+  vector(real(8)) :: y
+  y = vec_splats(x)
+! FIR: %[[x:.*]] = fir.load %arg0 : !fir.ref<f64>
+! FIR: %[[vy:.*]] = vector.splat %[[x]] : vector<2xf64>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<2xf64>) -> !fir.vector<2:f64>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<2:f64>>
+
+! MLIR: %[[x:.*]] = llvm.load %arg0 : !llvm.ptr<f64>
+! MLIR: %[[undef:.*]] = llvm.mlir.undef : vector<2xf64>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[x]], %[[undef]][%[[zero]] : i32] : vector<2xf64>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[undef]] [0, 0] : vector<2xf64>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<2xf64>>
+
+! LLVMIR: %[[x:.*]] = load double, ptr %{{[0-9]}}, align 8
+! LLVMIR: %[[ins:.*]] = insertelement <2 x double> undef, double %[[x]], i32 0
+! LLVMIR: %[[y:.*]] = shufflevector <2 x double> %[[ins]], <2 x double> undef, <2 x i32> zeroinitializer
+! LLVMIR: store <2 x double> %[[y]], ptr %{{[0-9]}}, align 16
+end subroutine vec_splats_testf64
+
+! CHECK-LABEL: vec_splat_s32testi8
+subroutine vec_splat_s32testi8()
+  vector(integer(4)) :: y
+  y = vec_splat_s32(7_1)
+! FIR: %[[val:.*]] = arith.constant 7 : i8
+! FIR: %[[cval:.*]] = fir.convert %[[val]] : (i8) -> i32
+! FIR: %[[vy:.*]] = vector.splat %[[cval]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:i32>>
+
+! MLIR: %[[val:.*]] = llvm.mlir.constant(7 : i8) : i8
+! MLIR: %[[cval:.*]] = llvm.sext %[[val]] : i8 to i32
+! MLIR: %[[und:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[cval]], %[[und]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[und]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: store <4 x i32> <i32 7, i32 7, i32 7, i32 7>, ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_s32testi8
+
+! CHECK-LABEL: vec_splat_s32testi16
+subroutine vec_splat_s32testi16()
+  vector(integer(4)) :: y
+  y = vec_splat_s32(7_2)
+! FIR: %[[val:.*]] = arith.constant 7 : i16
+! FIR: %[[cval:.*]] = fir.convert %[[val]] : (i16) -> i32
+! FIR: %[[vy:.*]] = vector.splat %[[cval]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:i32>>
+
+! MLIR: %[[val:.*]] = llvm.mlir.constant(7 : i16) : i16
+! MLIR: %[[cval:.*]] = llvm.sext %[[val]] : i16 to i32
+! MLIR: %[[und:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[cval]], %[[und]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[und]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: store <4 x i32> <i32 7, i32 7, i32 7, i32 7>, ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_s32testi16
+
+! CHECK-LABEL: vec_splat_s32testi32
+subroutine vec_splat_s32testi32()
+  vector(integer(4)) :: y
+  y = vec_splat_s32(7_4)
+! FIR: %[[val:.*]] = arith.constant 7 : i32
+! FIR: %[[vy:.*]] = vector.splat %[[val]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:i32>>
+
+! MLIR: %[[val:.*]] = llvm.mlir.constant(7 : i32) : i32
+! MLIR: %[[y:.*]] = llvm.mlir.constant(dense<7> : vector<4xi32>) : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: store <4 x i32> <i32 7, i32 7, i32 7, i32 7>, ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_s32testi32
+
+! CHECK-LABEL: vec_splat_s32testi64
+subroutine vec_splat_s32testi64()
+  vector(integer(4)) :: y
+  y = vec_splat_s32(7_8)
+! FIR: %[[val:.*]] = arith.constant 7 : i64
+! FIR: %[[cval:.*]] = fir.convert %[[val]] : (i64) -> i32
+! FIR: %[[vy:.*]] = vector.splat %[[cval]] : vector<4xi32>
+! FIR: %[[y:.*]] = fir.convert %[[vy]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! FIR: fir.store %[[y]] to %{{[0-9]}} : !fir.ref<!fir.vector<4:i32>>
+
+! MLIR: %[[val:.*]] = llvm.mlir.constant(7 : i64) : i64
+! MLIR: %[[cval:.*]] = llvm.trunc %[[val]] : i64 to i32
+! MLIR: %[[und:.*]] = llvm.mlir.undef : vector<4xi32>
+! MLIR: %[[zero:.*]] = llvm.mlir.constant(0 : i32) : i32
+! MLIR: %[[ins:.*]] = llvm.insertelement %[[cval]], %[[und]][%[[zero]] : i32] : vector<4xi32>
+! MLIR: %[[y:.*]] = llvm.shufflevector %[[ins]], %[[und]] [0, 0, 0, 0] : vector<4xi32>
+! MLIR: llvm.store %[[y]], %{{[0-9]}} : !llvm.ptr<vector<4xi32>>
+
+! LLVMIR: store <4 x i32> <i32 7, i32 7, i32 7, i32 7>, ptr %{{[0-9]}}, align 16
+end subroutine vec_splat_s32testi64

diff  --git a/flang/test/Semantics/PowerPC/ppc-vector-intrinsics.f90 b/flang/test/Semantics/PowerPC/ppc-vector-intrinsics.f90
index 04244687ec374c..e0df315fa57dff 100644
--- a/flang/test/Semantics/PowerPC/ppc-vector-intrinsics.f90
+++ b/flang/test/Semantics/PowerPC/ppc-vector-intrinsics.f90
@@ -8,17 +8,17 @@ program test
 
 !ERROR: Actual argument #3 must be a constant expression
   r = vec_sld(arg1, arg2, i)
-!ERROR: Argument #3 must be a constant expression in range 0-15
+!ERROR: Argument #3 must be a constant expression in range 0 to 15
   r = vec_sld(arg1, arg2, 17)
 
 !ERROR: Actual argument #3 must be a constant expression
   r = vec_sldw(arg1, arg2, i)
-!ERROR: Argument #3 must be a constant expression in range 0-3
+!ERROR: Argument #3 must be a constant expression in range 0 to 3
   r = vec_sldw(arg1, arg2, 5)
 
 !ERROR: Actual argument #2 must be a constant expression
   rr = vec_ctf(arg1, i)
-! ERROR: Argument #2 must be a constant expression in range 0-31
+! ERROR: Argument #2 must be a constant expression in range 0 to 31
   rr = vec_ctf(arg1, 37)
 end program test
 
@@ -27,6 +27,27 @@ subroutine test_vec_permi()
   integer :: arg3
 !ERROR: Actual argument #3 must be a constant expression
   r = vec_permi(arg1, arg2, arg3)
-! ERROR: Argument #3 must be a constant expression in range 0-3
+! ERROR: Argument #3 must be a constant expression in range 0 to 3
   r = vec_permi(arg1, arg2, 11)
 end
+
+subroutine test_vec_splat()
+  vector(integer(8)) :: arg1_8, r8
+  vector(integer(2)) :: arg1_2, r2
+  integer(2) :: arg2
+!ERROR: Actual argument #2 must be a constant expression
+  r8 = vec_splat(arg1_8, arg2)
+!ERROR: Argument #2 must be a constant expression in range 0 to 1
+  r8 = vec_splat(arg1_8, 3)
+!ERROR: Argument #2 must be a constant expression in range 0 to 7
+  r2 = vec_splat(arg1_2, 11)
+end
+
+subroutine test_vec_splat_s32()
+  integer(4) :: arg1
+  vector(integer(4)) :: r
+!ERROR: Actual argument #1 must be a constant expression
+  r = vec_splat_s32(arg1)
+!ERROR: Argument #1 must be a constant expression in range -16 to 15
+  r = vec_splat_s32(17)
+end


        


More information about the flang-commits mailing list