[flang-commits] [flang] 10124b3 - [flang] Add PowerPC vec_sl, vec_sld, vec_sldw, vec_sll, vec_slo, vec_srl and vec_sro intrinsic

Kelvin Li via flang-commits flang-commits at lists.llvm.org
Wed Jul 12 14:39:45 PDT 2023


Author: Kelvin Li
Date: 2023-07-12T17:39:13-04:00
New Revision: 10124b3e1e194e34cb1ca450dacc68668fee5014

URL: https://github.com/llvm/llvm-project/commit/10124b3e1e194e34cb1ca450dacc68668fee5014
DIFF: https://github.com/llvm/llvm-project/commit/10124b3e1e194e34cb1ca450dacc68668fee5014.diff

LOG: [flang] Add PowerPC vec_sl, vec_sld, vec_sldw, vec_sll, vec_slo, vec_srl and vec_sro intrinsic

Co-authored-by: pscoro

Differential Revision: https://reviews.llvm.org/D154563

Added: 
    flang/test/Lower/PowerPC/ppc-vec-shift.f90
    flang/test/Semantics/PowerPC/ppc-vector-intrinsics.f90

Modified: 
    flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
    flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
    flang/lib/Semantics/check-call.cpp
    flang/module/__ppc_intrinsics.f90

Removed: 
    


################################################################################
diff  --git a/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h b/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
index b6d03ef0039bcf..ea028f12f65758 100644
--- a/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
+++ b/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
@@ -17,7 +17,26 @@ namespace fir {
 
 /// Enums used to templatize vector intrinsic function generators. Enum does
 /// not contain every vector intrinsic, only intrinsics that share generators.
-enum class VecOp { Add, And, Anyge, Cmpge, Cmpgt, Cmple, Cmplt, Mul, Sub, Xor };
+enum class VecOp {
+  Add,
+  And,
+  Anyge,
+  Cmpge,
+  Cmpgt,
+  Cmple,
+  Cmplt,
+  Mul,
+  Sl,
+  Sld,
+  Sldw,
+  Sll,
+  Slo,
+  Sr,
+  Srl,
+  Sro,
+  Sub,
+  Xor
+};
 
 // Wrapper struct to encapsulate information for a vector type. Preserves
 // sign of eleTy if eleTy is signed/unsigned integer. Helps with vector type
@@ -96,6 +115,10 @@ struct PPCIntrinsicLibrary : IntrinsicLibrary {
   template <VecOp>
   fir::ExtendedValue genVecAnyCompare(mlir::Type resultType,
                                       llvm::ArrayRef<fir::ExtendedValue> args);
+
+  template <VecOp>
+  fir::ExtendedValue genVecShift(mlir::Type,
+                                 llvm::ArrayRef<fir::ExtendedValue>);
 };
 
 const IntrinsicHandler *findPPCIntrinsicHandler(llvm::StringRef name);

diff  --git a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
index 9c146a301957aa..ecf8af3e3b0294 100644
--- a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
@@ -73,6 +73,46 @@ static constexpr IntrinsicHandler ppcHandlers[]{
          &PI::genVecAddAndMulSubXor<VecOp::Mul>),
      {{{"arg1", asValue}, {"arg2", asValue}}},
      /*isElemental=*/true},
+    {"__ppc_vec_sl",
+     static_cast<IntrinsicLibrary::ExtendedGenerator>(
+         &PI::genVecShift<VecOp::Sl>),
+     {{{"arg1", asValue}, {"arg2", asValue}}},
+     /*isElemental=*/true},
+    {"__ppc_vec_sld",
+     static_cast<IntrinsicLibrary::ExtendedGenerator>(
+         &PI::genVecShift<VecOp::Sld>),
+     {{{"arg1", asValue}, {"arg2", asValue}, {"arg3", asValue}}},
+     /*isElemental=*/true},
+    {"__ppc_vec_sldw",
+     static_cast<IntrinsicLibrary::ExtendedGenerator>(
+         &PI::genVecShift<VecOp::Sldw>),
+     {{{"arg1", asValue}, {"arg2", asValue}, {"arg3", asValue}}},
+     /*isElemental=*/true},
+    {"__ppc_vec_sll",
+     static_cast<IntrinsicLibrary::ExtendedGenerator>(
+         &PI::genVecShift<VecOp::Sll>),
+     {{{"arg1", asValue}, {"arg2", asValue}}},
+     /*isElemental=*/true},
+    {"__ppc_vec_slo",
+     static_cast<IntrinsicLibrary::ExtendedGenerator>(
+         &PI::genVecShift<VecOp::Slo>),
+     {{{"arg1", asValue}, {"arg2", asValue}}},
+     /*isElemental=*/true},
+    {"__ppc_vec_sr",
+     static_cast<IntrinsicLibrary::ExtendedGenerator>(
+         &PI::genVecShift<VecOp::Sr>),
+     {{{"arg1", asValue}, {"arg2", asValue}}},
+     /*isElemental=*/true},
+    {"__ppc_vec_srl",
+     static_cast<IntrinsicLibrary::ExtendedGenerator>(
+         &PI::genVecShift<VecOp::Srl>),
+     {{{"arg1", asValue}, {"arg2", asValue}}},
+     /*isElemental=*/true},
+    {"__ppc_vec_sro",
+     static_cast<IntrinsicLibrary::ExtendedGenerator>(
+         &PI::genVecShift<VecOp::Sro>),
+     {{{"arg1", asValue}, {"arg2", asValue}}},
+     /*isElemental=*/true},
     {"__ppc_vec_sub",
      static_cast<IntrinsicLibrary::ExtendedGenerator>(
          &PI::genVecAddAndMulSubXor<VecOp::Sub>),
@@ -641,4 +681,134 @@ PPCIntrinsicLibrary::genVecCmp(mlir::Type resultType,
   return res;
 }
 
+// VEC_SL, VEC_SLD, VEC_SLDW, VEC_SLL, VEC_SLO, VEC_SR, VEC_SRL, VEC_SRO
+template <VecOp vop>
+fir::ExtendedValue
+PPCIntrinsicLibrary::genVecShift(mlir::Type resultType,
+                                 llvm::ArrayRef<fir::ExtendedValue> args) {
+  auto context{builder.getContext()};
+  auto argBases{getBasesForArgs(args)};
+  auto argTypes{getTypesForArgs(argBases)};
+
+  llvm::SmallVector<VecTypeInfo, 2> vecTyInfoArgs;
+  vecTyInfoArgs.push_back(getVecTypeFromFir(argBases[0]));
+  vecTyInfoArgs.push_back(getVecTypeFromFir(argBases[1]));
+
+  // Convert the first two arguments to MLIR vectors
+  llvm::SmallVector<mlir::Type, 2> mlirTyArgs;
+  mlirTyArgs.push_back(vecTyInfoArgs[0].toMlirVectorType(context));
+  mlirTyArgs.push_back(vecTyInfoArgs[1].toMlirVectorType(context));
+
+  llvm::SmallVector<mlir::Value, 2> mlirVecArgs;
+  mlirVecArgs.push_back(builder.createConvert(loc, mlirTyArgs[0], argBases[0]));
+  mlirVecArgs.push_back(builder.createConvert(loc, mlirTyArgs[1], argBases[1]));
+
+  mlir::Value shftRes{nullptr};
+
+  if (vop == VecOp::Sl || vop == VecOp::Sr) {
+    assert(args.size() == 2);
+    // Construct the mask
+    auto width{
+        mlir::dyn_cast<mlir::IntegerType>(vecTyInfoArgs[1].eleTy).getWidth()};
+    auto vecVal{builder.createIntegerConstant(
+        loc, getConvertedElementType(context, vecTyInfoArgs[0].eleTy), width)};
+    auto mask{
+        builder.create<mlir::vector::BroadcastOp>(loc, mlirTyArgs[1], vecVal)};
+    auto shft{builder.create<mlir::arith::RemUIOp>(loc, mlirVecArgs[1], mask)};
+
+    mlir::Value res{nullptr};
+    if (vop == VecOp::Sr)
+      res = builder.create<mlir::arith::ShRUIOp>(loc, mlirVecArgs[0], shft);
+    else if (vop == VecOp::Sl)
+      res = builder.create<mlir::arith::ShLIOp>(loc, mlirVecArgs[0], shft);
+
+    shftRes = builder.createConvert(loc, argTypes[0], res);
+  } else if (vop == VecOp::Sll || vop == VecOp::Slo || vop == VecOp::Srl ||
+             vop == VecOp::Sro) {
+    assert(args.size() == 2);
+
+    // Bitcast to vector<4xi32>
+    auto bcVecTy{mlir::VectorType::get(4, builder.getIntegerType(32))};
+    if (mlirTyArgs[0] != bcVecTy)
+      mlirVecArgs[0] =
+          builder.create<mlir::vector::BitCastOp>(loc, bcVecTy, mlirVecArgs[0]);
+    if (mlirTyArgs[1] != bcVecTy)
+      mlirVecArgs[1] =
+          builder.create<mlir::vector::BitCastOp>(loc, bcVecTy, mlirVecArgs[1]);
+
+    llvm::StringRef funcName;
+    switch (vop) {
+    case VecOp::Srl:
+      funcName = "llvm.ppc.altivec.vsr";
+      break;
+    case VecOp::Sro:
+      funcName = "llvm.ppc.altivec.vsro";
+      break;
+    case VecOp::Sll:
+      funcName = "llvm.ppc.altivec.vsl";
+      break;
+    case VecOp::Slo:
+      funcName = "llvm.ppc.altivec.vslo";
+      break;
+    default:
+      llvm_unreachable("unknown vector shift operation");
+    }
+    auto funcTy{genFuncType<Ty::IntegerVector<4>, Ty::IntegerVector<4>,
+                            Ty::IntegerVector<4>>(context, builder)};
+    mlir::func::FuncOp funcOp{builder.addNamedFunction(loc, funcName, funcTy)};
+    auto callOp{builder.create<fir::CallOp>(loc, funcOp, mlirVecArgs)};
+
+    // If the result vector type is 
diff erent from the original type, need
+    // to convert to mlir vector, bitcast and then convert back to fir vector.
+    if (callOp.getResult(0).getType() != argTypes[0]) {
+      auto res = builder.createConvert(loc, bcVecTy, callOp.getResult(0));
+      res = builder.create<mlir::vector::BitCastOp>(loc, mlirTyArgs[0], res);
+      shftRes = builder.createConvert(loc, argTypes[0], res);
+    } else {
+      shftRes = callOp.getResult(0);
+    }
+  } else if (vop == VecOp::Sld || vop == VecOp::Sldw) {
+    assert(args.size() == 3);
+    auto constIntOp =
+        mlir::dyn_cast<mlir::arith::ConstantOp>(argBases[2].getDefiningOp())
+            .getValue()
+            .dyn_cast_or_null<mlir::IntegerAttr>();
+    assert(constIntOp && "expected integer constant argument");
+
+    // Bitcast to vector<16xi8>
+    auto vi8Ty{mlir::VectorType::get(16, builder.getIntegerType(8))};
+    if (mlirTyArgs[0] != vi8Ty) {
+      mlirVecArgs[0] =
+          builder.create<mlir::LLVM::BitcastOp>(loc, vi8Ty, mlirVecArgs[0])
+              .getResult();
+      mlirVecArgs[1] =
+          builder.create<mlir::LLVM::BitcastOp>(loc, vi8Ty, mlirVecArgs[1])
+              .getResult();
+    }
+
+    // Construct the mask for shuffling
+    auto shiftVal{constIntOp.getInt()};
+    if (vop == VecOp::Sldw)
+      shiftVal = shiftVal << 2;
+    shiftVal &= 0xF;
+    llvm::SmallVector<int64_t, 16> mask;
+    for (int i = 16; i < 32; ++i)
+      mask.push_back(i - shiftVal);
+
+    // Shuffle with mask
+    shftRes = builder.create<mlir::vector::ShuffleOp>(loc, mlirVecArgs[1],
+                                                      mlirVecArgs[0], mask);
+
+    // Bitcast to the original type
+    if (shftRes.getType() != mlirTyArgs[0])
+      shftRes =
+          builder.create<mlir::LLVM::BitcastOp>(loc, mlirTyArgs[0], shftRes);
+
+    return builder.createConvert(loc, resultType, shftRes);
+  } else
+    llvm_unreachable("Invalid vector operation for generator");
+
+  return shftRes;
+}
+
 } // namespace fir

diff  --git a/flang/lib/Semantics/check-call.cpp b/flang/lib/Semantics/check-call.cpp
index ad6359c356b2cd..260a1827c6b797 100644
--- a/flang/lib/Semantics/check-call.cpp
+++ b/flang/lib/Semantics/check-call.cpp
@@ -1388,6 +1388,12 @@ bool CheckPPCIntrinsic(const Symbol &generic, const Symbol &specific,
     return CheckArgumentIsConstantExprInRange(actuals, 0, 0, 7, messages) &&
         CheckArgumentIsConstantExprInRange(actuals, 1, 0, 15, messages);
   }
+  if (specific.name().ToString().compare(0, 14, "__ppc_vec_sld_") == 0) {
+    return CheckArgumentIsConstantExprInRange(actuals, 2, 0, 15, messages);
+  }
+  if (specific.name().ToString().compare(0, 15, "__ppc_vec_sldw_") == 0) {
+    return CheckArgumentIsConstantExprInRange(actuals, 2, 0, 3, messages);
+  }
   return false;
 }
 

diff  --git a/flang/module/__ppc_intrinsics.f90 b/flang/module/__ppc_intrinsics.f90
index 3fff2aa7a694b6..e11e2e6e1f65a8 100644
--- a/flang/module/__ppc_intrinsics.f90
+++ b/flang/module/__ppc_intrinsics.f90
@@ -35,11 +35,21 @@ elemental vector(unsigned(VKIND)) function elem_func_vu##VKIND##vi##VKIND##vi##V
     vector(integer(VKIND)), intent(in) :: arg1, arg2; \
   end function ;
 
+! vector(i) function f(vector(i), vector(u))
+#define ELEM_FUNC_VIVIVU_2(VKIND1, VKIND2) \
+  elemental vector(integer(VKIND1)) function elem_func_vi##VKIND1##vi##VKIND1##vu##VKIND2(arg1, arg2); \
+    vector(integer(VKIND1)), intent(in) :: arg1; \
+    vector(unsigned(VKIND2)), intent(in) :: arg2; \
+  end function ;
+#define ELEM_FUNC_VIVIVU(VKIND) ELEM_FUNC_VIVIVU_2(VKIND, VKIND)
+
 ! vector(u) function f(vector(u), vector(u))
-#define ELEM_FUNC_VUVUVU(VKIND) \
-  elemental vector(unsigned(VKIND)) function elem_func_vu##VKIND##vu##VKIND##vu##VKIND(arg1, arg2); \
-    vector(unsigned(VKIND)), intent(in) :: arg1, arg2; \
+#define ELEM_FUNC_VUVUVU_2(VKIND1, VKIND2) \
+  elemental vector(unsigned(VKIND1)) function elem_func_vu##VKIND1##vu##VKIND1##vu##VKIND2(arg1, arg2); \
+    vector(unsigned(VKIND1)), intent(in) :: arg1; \
+    vector(unsigned(VKIND2)), intent(in) :: arg2; \
   end function ;
+#define ELEM_FUNC_VUVUVU(VKIND) ELEM_FUNC_VUVUVU_2(VKIND, VKIND)
 
 ! vector(r) function f(vector(r), vector(r))
 #define ELEM_FUNC_VRVRVR(VKIND) \
@@ -47,6 +57,13 @@ elemental vector(real(VKIND)) function elem_func_vr##VKIND##vr##VKIND##vr##VKIND
     vector(real(VKIND)), intent(in) :: arg1, arg2; \
   end function ;
 
+! vector(r) function f(vector(r), vector(u))
+#define ELEM_FUNC_VRVRVU_2(VKIND1, VKIND2) \
+  elemental vector(real(VKIND1)) function elem_func_vr##VKIND1##vr##VKIND1##vu##VKIND2(arg1, arg2); \
+    vector(real(VKIND1)), intent(in) :: arg1; \
+    vector(unsigned(VKIND2)), intent(in) :: arg2; \
+  end function ;
+
 ! vector(u) function f(vector(r), vector(r))
 #define ELEM_FUNC_VUVRVR(VKIND) \
   elemental vector(unsigned(VKIND)) function elem_func_vu##VKIND##vr##VKIND##vr##VKIND(arg1, arg2); \
@@ -74,6 +91,14 @@ elemental integer(RKIND) function elem_func_i##RKIND##vr##VKIND##vr##VKIND(arg1,
   ELEM_FUNC_VIVIVI(1) ELEM_FUNC_VIVIVI(2) ELEM_FUNC_VIVIVI(4) ELEM_FUNC_VIVIVI(8)
   ELEM_FUNC_VUVIVI(1) ELEM_FUNC_VUVIVI(2) ELEM_FUNC_VUVIVI(4) ELEM_FUNC_VUVIVI(8)
   ELEM_FUNC_VUVUVU(1) ELEM_FUNC_VUVUVU(2) ELEM_FUNC_VUVUVU(4) ELEM_FUNC_VUVUVU(8)
+  ELEM_FUNC_VIVIVU(1) ELEM_FUNC_VIVIVU(2) ELEM_FUNC_VIVIVU(4) ELEM_FUNC_VIVIVU(8)
+  ELEM_FUNC_VIVIVU_2(1,2) ELEM_FUNC_VIVIVU_2(1,4)
+  ELEM_FUNC_VIVIVU_2(2,1) ELEM_FUNC_VIVIVU_2(2,4)
+  ELEM_FUNC_VIVIVU_2(4,1) ELEM_FUNC_VIVIVU_2(4,2)
+  ELEM_FUNC_VUVUVU_2(1,2) ELEM_FUNC_VUVUVU_2(1,4)
+  ELEM_FUNC_VUVUVU_2(2,1) ELEM_FUNC_VUVUVU_2(2,4)
+  ELEM_FUNC_VUVUVU_2(4,1) ELEM_FUNC_VUVUVU_2(4,2)
+  ELEM_FUNC_VRVRVU_2(4,1) ELEM_FUNC_VRVRVU_2(4,2)
   ELEM_FUNC_VRVRVR(4) ELEM_FUNC_VRVRVR(8)
   ELEM_FUNC_VUVRVR(4) ELEM_FUNC_VUVRVR(8)
   ELEM_FUNC_IVIVI(4,1) ELEM_FUNC_IVIVI(4,2) ELEM_FUNC_IVIVI(4,4) ELEM_FUNC_IVIVI(4,8)
@@ -82,9 +107,13 @@ elemental integer(RKIND) function elem_func_i##RKIND##vr##VKIND##vr##VKIND(arg1,
 
 #undef ELEM_FUNC_IVIVI
 #undef ELEM_FUNC_IVUVU
+#undef ELEM_FUNC_VIVIVU_2
+#undef ELEM_FUNC_VUVUVU_2
+#undef ELEM_FUNC_VRVRVU_2
 #undef ELEM_FUNC_IVRVR
 #undef ELEM_FUNC_VUVRVR
 #undef ELEM_FUNC_VRVRVR
+#undef ELEM_FUNC_VIVIVU
 #undef ELEM_FUNC_VUVUVU
 #undef ELEM_FUNC_VUVIVI
 #undef ELEM_FUNC_VIVIVI
@@ -96,8 +125,42 @@ elemental vector(real(VKIND)) function elem_func_vr##VKIND##vr##VKIND##vr##VKIND
     vector(real(VKIND)), intent(in) :: arg1, arg2, arg3; \
   end function ;
 
-  ELEM_FUNC_VRVRVRVR(4) ELEM_FUNC_VRVRVRVR(8)
+! vector(i) function f(vector(i), vector(i), i)
+#define ELEM_FUNC_VIVIVII(VKIND, IKIND) \
+  elemental vector(integer(VKIND)) function elem_func_vi##VKIND##vi##VKIND##vi##VKIND##i##IKIND(arg1, arg2, arg3); \
+    vector(integer(VKIND)), intent(in) :: arg1, arg2; \
+    integer(IKIND), intent(in) :: arg3; \
+  end function ;
 
+! vector(u) function f(vector(u), vector(u), i)
+#define ELEM_FUNC_VUVUVUI(VKIND, IKIND) \
+  elemental vector(unsigned(VKIND)) function elem_func_vu##VKIND##vu##VKIND##vu##VKIND##i##IKIND(arg1, arg2, arg3); \
+    vector(unsigned(VKIND)), intent(in) :: arg1, arg2; \
+    integer(IKIND), intent(in) :: arg3; \
+  end function ;
+
+! vector(r) function f(vector(r), vector(r), i)
+#define ELEM_FUNC_VRVRVRI(VKIND, IKIND) \
+  elemental vector(real(VKIND)) function elem_func_vr##VKIND##vr##VKIND##vr##VKIND##i##IKIND(arg1, arg2, arg3); \
+    vector(real(VKIND)), intent(in) :: arg1, arg2; \
+    integer(IKIND), intent(in) :: arg3; \
+  end function ;
+
+  ELEM_FUNC_VRVRVRVR(4) ELEM_FUNC_VRVRVRVR(8)
+  ELEM_FUNC_VIVIVII(1,1) ELEM_FUNC_VIVIVII(1,2) ELEM_FUNC_VIVIVII(1,4) ELEM_FUNC_VIVIVII(1,8)
+  ELEM_FUNC_VIVIVII(2,1) ELEM_FUNC_VIVIVII(2,2) ELEM_FUNC_VIVIVII(2,4) ELEM_FUNC_VIVIVII(2,8)
+  ELEM_FUNC_VIVIVII(4,1) ELEM_FUNC_VIVIVII(4,2) ELEM_FUNC_VIVIVII(4,4) ELEM_FUNC_VIVIVII(4,8)
+  ELEM_FUNC_VIVIVII(8,1) ELEM_FUNC_VIVIVII(8,2) ELEM_FUNC_VIVIVII(8,4) ELEM_FUNC_VIVIVII(8,8)
+  ELEM_FUNC_VUVUVUI(1,1) ELEM_FUNC_VUVUVUI(1,2) ELEM_FUNC_VUVUVUI(1,4) ELEM_FUNC_VUVUVUI(1,8)
+  ELEM_FUNC_VUVUVUI(2,1) ELEM_FUNC_VUVUVUI(2,2) ELEM_FUNC_VUVUVUI(2,4) ELEM_FUNC_VUVUVUI(2,8)
+  ELEM_FUNC_VUVUVUI(4,1) ELEM_FUNC_VUVUVUI(4,2) ELEM_FUNC_VUVUVUI(4,4) ELEM_FUNC_VUVUVUI(4,8)
+  ELEM_FUNC_VUVUVUI(8,1) ELEM_FUNC_VUVUVUI(8,2) ELEM_FUNC_VUVUVUI(8,4) ELEM_FUNC_VUVUVUI(8,8)
+  ELEM_FUNC_VRVRVRI(4,1) ELEM_FUNC_VRVRVRI(4,2) ELEM_FUNC_VRVRVRI(4,4) ELEM_FUNC_VRVRVRI(4,8)
+  ELEM_FUNC_VRVRVRI(8,1) ELEM_FUNC_VRVRVRI(8,2) ELEM_FUNC_VRVRVRI(8,4) ELEM_FUNC_VRVRVRI(8,8)
+
+#undef ELEM_FUNC_VIVIVII
+#undef ELEM_FUNC_VUVUVUI
+#undef ELEM_FUNC_VRVRVRI
 #undef ELEM_FUNC_VRVRVRVR
 
   end interface
@@ -262,20 +325,30 @@ end function func_r8r8i
 !---------------------------------
 #define VI_VI_VI(NAME, VKIND) __ppc_##NAME##_vi##VKIND##vi##VKIND##vi##VKIND
 #define VU_VI_VI(NAME, VKIND) __ppc_##NAME##_vu##VKIND##vi##VKIND##vi##VKIND
-#define VU_VU_VU(NAME, VKIND) __ppc_##NAME##_vu##VKIND##vu##VKIND##vu##VKIND
+#define VU_VU_VU_2(NAME, VKIND1, VKIND2) __ppc_##NAME##_vu##VKIND1##vu##VKIND1##vu##VKIND2
+#define VU_VU_VU(NAME, VKIND) VU_VU_VU_2(NAME, VKIND, VKIND)
+#define VI_VI_VU_2(NAME, VKIND1, VKIND2) __ppc_##NAME##_vi##VKIND1##vi##VKIND1##vu##VKIND2
+#define VI_VI_VU(NAME, VKIND) VI_VI_VU_2(NAME, VKIND, VKIND)
 #define VR_VR_VR(NAME, VKIND) __ppc_##NAME##_vr##VKIND##vr##VKIND##vr##VKIND
+#define VR_VR_VU_2(NAME, VKIND1, VKIND2) __ppc_##NAME##_vr##VKIND1##vr##VKIND1##vu##VKIND2
 #define VU_VR_VR(NAME, VKIND) __ppc_##NAME##_vu##VKIND##vr##VKIND##vr##VKIND
 
 #define VEC_VI_VI_VI(NAME, VKIND) \
   procedure(elem_func_vi##VKIND##vi##VKIND##vi##VKIND) :: VI_VI_VI(NAME, VKIND);
 #define VEC_VU_VI_VI(NAME, VKIND) \
   procedure(elem_func_vu##VKIND##vi##VKIND##vi##VKIND) :: VU_VI_VI(NAME, VKIND);
-#define VEC_VU_VU_VU(NAME, VKIND) \
-  procedure(elem_func_vu##VKIND##vu##VKIND##vu##VKIND) :: VU_VU_VU(NAME, VKIND);
+#define VEC_VU_VU_VU_2(NAME, VKIND1, VKIND2) \
+  procedure(elem_func_vu##VKIND1##vu##VKIND1##vu##VKIND2) :: VU_VU_VU_2(NAME, VKIND1, VKIND2);
+#define VEC_VU_VU_VU(NAME, VKIND) VEC_VU_VU_VU_2(NAME, VKIND, VKIND)
+#define VEC_VI_VI_VU_2(NAME, VKIND1, VKIND2) \
+  procedure(elem_func_vi##VKIND1##vi##VKIND1##vu##VKIND2) :: VI_VI_VU_2(NAME, VKIND1, VKIND2);
+#define VEC_VI_VI_VU(NAME, VKIND) VEC_VI_VI_VU_2(NAME, VKIND, VKIND)
 #define VEC_VR_VR_VR(NAME, VKIND) \
   procedure(elem_func_vr##VKIND##vr##VKIND##vr##VKIND) :: VR_VR_VR(NAME, VKIND);
 #define VEC_VU_VR_VR(NAME, VKIND) \
   procedure(elem_func_vu##VKIND##vr##VKIND##vr##VKIND) :: VU_VR_VR(NAME, VKIND);
+#define VEC_VR_VR_VU(NAME, VKIND1, VKIND2) \
+  procedure(elem_func_vr##VKIND1##vr##VKIND1##vu##VKIND2) :: VR_VR_VU_2(NAME, VKIND1, VKIND2);
 
 ! vec_add
   VEC_VI_VI_VI(vec_add,1) VEC_VI_VI_VI(vec_add,2) VEC_VI_VI_VI(vec_add,4) VEC_VI_VI_VI(vec_add,8)
@@ -387,6 +460,88 @@ end function func_r8r8i
   end interface vec_sub
   public :: vec_sub
 
+! vec_sl
+  VEC_VI_VI_VU(vec_sl,1) VEC_VI_VI_VU(vec_sl,2) VEC_VI_VI_VU(vec_sl,4) VEC_VI_VI_VU(vec_sl,8)
+  VEC_VU_VU_VU(vec_sl,1) VEC_VU_VU_VU(vec_sl,2) VEC_VU_VU_VU(vec_sl,4) VEC_VU_VU_VU(vec_sl,8)
+  interface vec_sl
+    procedure :: VI_VI_VU(vec_sl,1), VI_VI_VU(vec_sl,2), VI_VI_VU(vec_sl,4), VI_VI_VU(vec_sl,8)
+    procedure :: VU_VU_VU(vec_sl,1), VU_VU_VU(vec_sl,2), VU_VU_VU(vec_sl,4), VU_VU_VU(vec_sl,8)
+  end interface vec_sl
+  public :: vec_sl
+
+! vec_sll
+  VEC_VI_VI_VU_2(vec_sll,1,1) VEC_VI_VI_VU_2(vec_sll,2,1) VEC_VI_VI_VU_2(vec_sll,4,1)
+  VEC_VI_VI_VU_2(vec_sll,1,2) VEC_VI_VI_VU_2(vec_sll,2,2) VEC_VI_VI_VU_2(vec_sll,4,2)
+  VEC_VI_VI_VU_2(vec_sll,1,4) VEC_VI_VI_VU_2(vec_sll,2,4) VEC_VI_VI_VU_2(vec_sll,4,4)
+  VEC_VU_VU_VU_2(vec_sll,1,1) VEC_VU_VU_VU_2(vec_sll,2,1) VEC_VU_VU_VU_2(vec_sll,4,1)
+  VEC_VU_VU_VU_2(vec_sll,1,2) VEC_VU_VU_VU_2(vec_sll,2,2) VEC_VU_VU_VU_2(vec_sll,4,2)
+  VEC_VU_VU_VU_2(vec_sll,1,4) VEC_VU_VU_VU_2(vec_sll,2,4) VEC_VU_VU_VU_2(vec_sll,4,4)
+  interface vec_sll
+    procedure :: VI_VI_VU_2(vec_sll,1,1), VI_VI_VU_2(vec_sll,2,1), VI_VI_VU_2(vec_sll,4,1)
+    procedure :: VI_VI_VU_2(vec_sll,1,2), VI_VI_VU_2(vec_sll,2,2), VI_VI_VU_2(vec_sll,4,2)
+    procedure :: VI_VI_VU_2(vec_sll,1,4), VI_VI_VU_2(vec_sll,2,4), VI_VI_VU_2(vec_sll,4,4)
+    procedure :: VU_VU_VU_2(vec_sll,1,1), VU_VU_VU_2(vec_sll,2,1), VU_VU_VU_2(vec_sll,4,1)
+    procedure :: VU_VU_VU_2(vec_sll,1,2), VU_VU_VU_2(vec_sll,2,2), VU_VU_VU_2(vec_sll,4,2)
+    procedure :: VU_VU_VU_2(vec_sll,1,4), VU_VU_VU_2(vec_sll,2,4), VU_VU_VU_2(vec_sll,4,4)
+  end interface vec_sll
+  public :: vec_sll
+
+! vec_slo
+  VEC_VI_VI_VU_2(vec_slo,1,1) VEC_VI_VI_VU_2(vec_slo,2,1) VEC_VI_VI_VU_2(vec_slo,4,1)
+  VEC_VI_VI_VU_2(vec_slo,1,2) VEC_VI_VI_VU_2(vec_slo,2,2) VEC_VI_VI_VU_2(vec_slo,4,2)
+  VEC_VU_VU_VU_2(vec_slo,1,1) VEC_VU_VU_VU_2(vec_slo,2,1) VEC_VU_VU_VU_2(vec_slo,4,1)
+  VEC_VU_VU_VU_2(vec_slo,1,2) VEC_VU_VU_VU_2(vec_slo,2,2) VEC_VU_VU_VU_2(vec_slo,4,2)
+  VEC_VR_VR_VU(vec_slo,4,1) VEC_VR_VR_VU(vec_slo,4,2)
+  interface vec_slo
+    procedure :: VI_VI_VU_2(vec_slo,1,1), VI_VI_VU_2(vec_slo,2,1), VI_VI_VU_2(vec_slo,4,1)
+    procedure :: VI_VI_VU_2(vec_slo,1,2), VI_VI_VU_2(vec_slo,2,2), VI_VI_VU_2(vec_slo,4,2)
+    procedure :: VU_VU_VU_2(vec_slo,1,1), VU_VU_VU_2(vec_slo,2,1), VU_VU_VU_2(vec_slo,4,1)
+    procedure :: VU_VU_VU_2(vec_slo,1,2), VU_VU_VU_2(vec_slo,2,2), VU_VU_VU_2(vec_slo,4,2)
+    procedure :: VR_VR_VU_2(vec_slo,4,1), VR_VR_VU_2(vec_slo,4,2)
+  end interface vec_slo
+  public :: vec_slo
+
+! vec_sr
+  VEC_VI_VI_VU(vec_sr,1) VEC_VI_VI_VU(vec_sr,2) VEC_VI_VI_VU(vec_sr,4) VEC_VI_VI_VU(vec_sr,8)
+  VEC_VU_VU_VU(vec_sr,1) VEC_VU_VU_VU(vec_sr,2) VEC_VU_VU_VU(vec_sr,4) VEC_VU_VU_VU(vec_sr,8)
+  interface vec_sr
+    procedure :: VI_VI_VU(vec_sr,1), VI_VI_VU(vec_sr,2), VI_VI_VU(vec_sr,4), VI_VI_VU(vec_sr,8)
+    procedure :: VU_VU_VU(vec_sr,1), VU_VU_VU(vec_sr,2), VU_VU_VU(vec_sr,4), VU_VU_VU(vec_sr,8)
+  end interface vec_sr
+  public :: vec_sr
+
+! vec_srl
+  VEC_VI_VI_VU_2(vec_srl,1,1) VEC_VI_VI_VU_2(vec_srl,2,1) VEC_VI_VI_VU_2(vec_srl,4,1)
+  VEC_VI_VI_VU_2(vec_srl,1,2) VEC_VI_VI_VU_2(vec_srl,2,2) VEC_VI_VI_VU_2(vec_srl,4,2)
+  VEC_VI_VI_VU_2(vec_srl,1,4) VEC_VI_VI_VU_2(vec_srl,2,4) VEC_VI_VI_VU_2(vec_srl,4,4)
+  VEC_VU_VU_VU_2(vec_srl,1,1) VEC_VU_VU_VU_2(vec_srl,2,1) VEC_VU_VU_VU_2(vec_srl,4,1)
+  VEC_VU_VU_VU_2(vec_srl,1,2) VEC_VU_VU_VU_2(vec_srl,2,2) VEC_VU_VU_VU_2(vec_srl,4,2)
+  VEC_VU_VU_VU_2(vec_srl,1,4) VEC_VU_VU_VU_2(vec_srl,2,4) VEC_VU_VU_VU_2(vec_srl,4,4)
+  interface vec_srl
+    procedure :: VI_VI_VU_2(vec_srl,1,1), VI_VI_VU_2(vec_srl,2,1), VI_VI_VU_2(vec_srl,4,1)
+    procedure :: VI_VI_VU_2(vec_srl,1,2), VI_VI_VU_2(vec_srl,2,2), VI_VI_VU_2(vec_srl,4,2)
+    procedure :: VI_VI_VU_2(vec_srl,1,4), VI_VI_VU_2(vec_srl,2,4), VI_VI_VU_2(vec_srl,4,4)
+    procedure :: VU_VU_VU_2(vec_srl,1,1), VU_VU_VU_2(vec_srl,2,1), VU_VU_VU_2(vec_srl,4,1)
+    procedure :: VU_VU_VU_2(vec_srl,1,2), VU_VU_VU_2(vec_srl,2,2), VU_VU_VU_2(vec_srl,4,2)
+    procedure :: VU_VU_VU_2(vec_srl,1,4), VU_VU_VU_2(vec_srl,2,4), VU_VU_VU_2(vec_srl,4,4)
+  end interface vec_srl
+  public :: vec_srl
+
+! vec_sro
+  VEC_VI_VI_VU_2(vec_sro,1,1) VEC_VI_VI_VU_2(vec_sro,2,1) VEC_VI_VI_VU_2(vec_sro,4,1)
+  VEC_VI_VI_VU_2(vec_sro,1,2) VEC_VI_VI_VU_2(vec_sro,2,2) VEC_VI_VI_VU_2(vec_sro,4,2)
+  VEC_VU_VU_VU_2(vec_sro,1,1) VEC_VU_VU_VU_2(vec_sro,2,1) VEC_VU_VU_VU_2(vec_sro,4,1)
+  VEC_VU_VU_VU_2(vec_sro,1,2) VEC_VU_VU_VU_2(vec_sro,2,2) VEC_VU_VU_VU_2(vec_sro,4,2)
+  VEC_VR_VR_VU(vec_sro,4,1) VEC_VR_VR_VU(vec_sro,4,2)
+  interface vec_sro
+    procedure :: VI_VI_VU_2(vec_sro,1,1), VI_VI_VU_2(vec_sro,2,1), VI_VI_VU_2(vec_sro,4,1)
+    procedure :: VI_VI_VU_2(vec_sro,1,2), VI_VI_VU_2(vec_sro,2,2), VI_VI_VU_2(vec_sro,4,2)
+    procedure :: VU_VU_VU_2(vec_sro,1,1), VU_VU_VU_2(vec_sro,2,1), VU_VU_VU_2(vec_sro,4,1)
+    procedure :: VU_VU_VU_2(vec_sro,1,2), VU_VU_VU_2(vec_sro,2,2), VU_VU_VU_2(vec_sro,4,2)
+    procedure :: VR_VR_VU_2(vec_sro,4,1), VR_VR_VU_2(vec_sro,4,2)
+  end interface vec_sro
+  public :: vec_sro
+
 ! vec_xor
   VEC_VI_VI_VI(vec_xor,1) VEC_VI_VI_VI(vec_xor,2) VEC_VI_VI_VI(vec_xor,4) VEC_VI_VI_VI(vec_xor,8)
   VEC_VU_VU_VU(vec_xor,1) VEC_VU_VU_VU(vec_xor,2) VEC_VU_VU_VU(vec_xor,4) VEC_VU_VU_VU(vec_xor,8)
@@ -401,11 +556,19 @@ end function func_r8r8i
 #undef VEC_VU_VR_VR
 #undef VEC_VR_VR_VR
 #undef VEC_VU_VU_VU
+#undef VEC_V_VR_VU
+#undef VEC_VU_VU_VU_2
 #undef VEC_VI_VI_VI
 #undef VEC_VU_VI_VI
+#undef VEC_VI_VI_VU
+#undef VEC_VI_VI_VU_2
 #undef VU_VR_VR
 #undef VR_VR_VR
 #undef VU_VU_VU
+#undef VU_VU_VU_2
+#undef VI_VI_VU_2
+#undef VI_VI_VU
+#undef VR_VR_VU_2
 #undef VU_VI_VI
 #undef VI_VI_VI
 
@@ -466,4 +629,69 @@ end function func_r8r8i
 #undef I_VU_VU
 #undef I_VI_VI
 
+!------------------------------------------
+! vector function(vector, vector, integer)
+!------------------------------------------
+#define VI_VI_VI_I(NAME, VKIND, IKIND) __ppc_##NAME##_vi##VKIND##vi##VKIND##vi##VKIND##i##IKIND
+#define VU_VU_VU_I(NAME, VKIND, IKIND) __ppc_##NAME##_vu##VKIND##vu##VKIND##vu##VKIND##i##IKIND
+#define VR_VR_VR_I(NAME, VKIND, IKIND) __ppc_##NAME##_vr##VKIND##vr##VKIND##vr##VKIND##i##IKIND
+
+#define VEC_VI_VI_VI_I(NAME, VKIND, IKIND) \
+  procedure(elem_func_vi##VKIND##vi##VKIND##vi##VKIND##i##IKIND) :: VI_VI_VI_I(NAME, VKIND, IKIND);
+#define VEC_VU_VU_VU_I(NAME, VKIND, IKIND) \
+  procedure(elem_func_vu##VKIND##vu##VKIND##vu##VKIND##i##IKIND) :: VU_VU_VU_I(NAME, VKIND, IKIND);
+#define VEC_VR_VR_VR_I(NAME, VKIND, IKIND) \
+  procedure(elem_func_vr##VKIND##vr##VKIND##vr##VKIND##i##IKIND) :: VR_VR_VR_I(NAME, VKIND, IKIND);
+
+! vec_sld
+  VEC_VI_VI_VI_I(vec_sld,1,1) VEC_VI_VI_VI_I(vec_sld,1,2) VEC_VI_VI_VI_I(vec_sld,1,4) VEC_VI_VI_VI_I(vec_sld,1,8)
+  VEC_VI_VI_VI_I(vec_sld,2,1) VEC_VI_VI_VI_I(vec_sld,2,2) VEC_VI_VI_VI_I(vec_sld,2,4) VEC_VI_VI_VI_I(vec_sld,2,8)
+  VEC_VI_VI_VI_I(vec_sld,4,1) VEC_VI_VI_VI_I(vec_sld,4,2) VEC_VI_VI_VI_I(vec_sld,4,4) VEC_VI_VI_VI_I(vec_sld,4,8)
+  VEC_VU_VU_VU_I(vec_sld,1,1) VEC_VU_VU_VU_I(vec_sld,1,2) VEC_VU_VU_VU_I(vec_sld,1,4) VEC_VU_VU_VU_I(vec_sld,1,8)
+  VEC_VU_VU_VU_I(vec_sld,2,1) VEC_VU_VU_VU_I(vec_sld,2,2) VEC_VU_VU_VU_I(vec_sld,2,4) VEC_VU_VU_VU_I(vec_sld,2,8)
+  VEC_VU_VU_VU_I(vec_sld,4,1) VEC_VU_VU_VU_I(vec_sld,4,2) VEC_VU_VU_VU_I(vec_sld,4,4) VEC_VU_VU_VU_I(vec_sld,4,8)
+  VEC_VR_VR_VR_I(vec_sld,4,1) VEC_VR_VR_VR_I(vec_sld,4,2) VEC_VR_VR_VR_I(vec_sld,4,4) VEC_VR_VR_VR_I(vec_sld,4,8)
+  interface vec_sld
+    procedure :: VI_VI_VI_I(vec_sld,1,1), VI_VI_VI_I(vec_sld,1,2), VI_VI_VI_I(vec_sld,1,4), VI_VI_VI_I(vec_sld,1,8)
+    procedure :: VI_VI_VI_I(vec_sld,2,1), VI_VI_VI_I(vec_sld,2,2), VI_VI_VI_I(vec_sld,2,4), VI_VI_VI_I(vec_sld,2,8)
+    procedure :: VI_VI_VI_I(vec_sld,4,1), VI_VI_VI_I(vec_sld,4,2), VI_VI_VI_I(vec_sld,4,4), VI_VI_VI_I(vec_sld,4,8)
+    procedure :: VU_VU_VU_I(vec_sld,1,1), VU_VU_VU_I(vec_sld,1,2), VU_VU_VU_I(vec_sld,1,4), VU_VU_VU_I(vec_sld,1,8)
+    procedure :: VU_VU_VU_I(vec_sld,2,1), VU_VU_VU_I(vec_sld,2,2), VU_VU_VU_I(vec_sld,2,4), VU_VU_VU_I(vec_sld,2,8)
+    procedure :: VU_VU_VU_I(vec_sld,4,1), VU_VU_VU_I(vec_sld,4,2), VU_VU_VU_I(vec_sld,4,4), VU_VU_VU_I(vec_sld,4,8)
+    procedure :: VR_VR_VR_I(vec_sld,4,1), VR_VR_VR_I(vec_sld,4,2), VR_VR_VR_I(vec_sld,4,4), VR_VR_VR_I(vec_sld,4,8)
+  end interface vec_sld
+  public :: vec_sld
+
+! vec_sldw
+  VEC_VI_VI_VI_I(vec_sldw,1,1) VEC_VI_VI_VI_I(vec_sldw,1,2) VEC_VI_VI_VI_I(vec_sldw,1,4) VEC_VI_VI_VI_I(vec_sldw,1,8)
+  VEC_VI_VI_VI_I(vec_sldw,2,1) VEC_VI_VI_VI_I(vec_sldw,2,2) VEC_VI_VI_VI_I(vec_sldw,2,4) VEC_VI_VI_VI_I(vec_sldw,2,8)
+  VEC_VI_VI_VI_I(vec_sldw,4,1) VEC_VI_VI_VI_I(vec_sldw,4,2) VEC_VI_VI_VI_I(vec_sldw,4,4) VEC_VI_VI_VI_I(vec_sldw,4,8)
+  VEC_VI_VI_VI_I(vec_sldw,8,1) VEC_VI_VI_VI_I(vec_sldw,8,2) VEC_VI_VI_VI_I(vec_sldw,8,4) VEC_VI_VI_VI_I(vec_sldw,8,8)
+  VEC_VU_VU_VU_I(vec_sldw,1,1) VEC_VU_VU_VU_I(vec_sldw,1,2) VEC_VU_VU_VU_I(vec_sldw,1,4) VEC_VU_VU_VU_I(vec_sldw,1,8)
+  VEC_VU_VU_VU_I(vec_sldw,2,1) VEC_VU_VU_VU_I(vec_sldw,2,2) VEC_VU_VU_VU_I(vec_sldw,2,4) VEC_VU_VU_VU_I(vec_sldw,2,8)
+  VEC_VU_VU_VU_I(vec_sldw,4,1) VEC_VU_VU_VU_I(vec_sldw,4,2) VEC_VU_VU_VU_I(vec_sldw,4,4) VEC_VU_VU_VU_I(vec_sldw,4,8)
+  VEC_VU_VU_VU_I(vec_sldw,8,1) VEC_VU_VU_VU_I(vec_sldw,8,2) VEC_VU_VU_VU_I(vec_sldw,8,4) VEC_VU_VU_VU_I(vec_sldw,8,8)
+  VEC_VR_VR_VR_I(vec_sldw,4,1) VEC_VR_VR_VR_I(vec_sldw,4,2) VEC_VR_VR_VR_I(vec_sldw,4,4) VEC_VR_VR_VR_I(vec_sldw,4,8)
+  VEC_VR_VR_VR_I(vec_sldw,8,1) VEC_VR_VR_VR_I(vec_sldw,8,2) VEC_VR_VR_VR_I(vec_sldw,8,4) VEC_VR_VR_VR_I(vec_sldw,8,8)
+  interface vec_sldw
+    procedure :: VI_VI_VI_I(vec_sldw,1,1), VI_VI_VI_I(vec_sldw,1,2), VI_VI_VI_I(vec_sldw,1,4), VI_VI_VI_I(vec_sldw,1,8)
+    procedure :: VI_VI_VI_I(vec_sldw,2,1), VI_VI_VI_I(vec_sldw,2,2), VI_VI_VI_I(vec_sldw,2,4), VI_VI_VI_I(vec_sldw,2,8)
+    procedure :: VI_VI_VI_I(vec_sldw,4,1), VI_VI_VI_I(vec_sldw,4,2), VI_VI_VI_I(vec_sldw,4,4), VI_VI_VI_I(vec_sldw,4,8)
+    procedure :: VI_VI_VI_I(vec_sldw,8,1), VI_VI_VI_I(vec_sldw,8,2), VI_VI_VI_I(vec_sldw,8,4), VI_VI_VI_I(vec_sldw,8,8)
+    procedure :: VU_VU_VU_I(vec_sldw,1,1), VU_VU_VU_I(vec_sldw,1,2), VU_VU_VU_I(vec_sldw,1,4), VU_VU_VU_I(vec_sldw,1,8)
+    procedure :: VU_VU_VU_I(vec_sldw,2,1), VU_VU_VU_I(vec_sldw,2,2), VU_VU_VU_I(vec_sldw,2,4), VU_VU_VU_I(vec_sldw,2,8)
+    procedure :: VU_VU_VU_I(vec_sldw,4,1), VU_VU_VU_I(vec_sldw,4,2), VU_VU_VU_I(vec_sldw,4,4), VU_VU_VU_I(vec_sldw,4,8)
+    procedure :: VU_VU_VU_I(vec_sldw,8,1), VU_VU_VU_I(vec_sldw,8,2), VU_VU_VU_I(vec_sldw,8,4), VU_VU_VU_I(vec_sldw,8,8)
+    procedure :: VR_VR_VR_I(vec_sldw,4,1), VR_VR_VR_I(vec_sldw,4,2), VR_VR_VR_I(vec_sldw,4,4), VR_VR_VR_I(vec_sldw,4,8)
+    procedure :: VR_VR_VR_I(vec_sldw,8,1), VR_VR_VR_I(vec_sldw,8,2), VR_VR_VR_I(vec_sldw,8,4), VR_VR_VR_I(vec_sldw,8,8)
+  end interface vec_sldw
+  public :: vec_sldw
+
+#undef VEC_VR_VR_VR_I
+#undef VEC_VU_VU_VU_I
+#undef VEC_VI_VI_VI_I
+#undef VR_VR_VR_I
+#undef VU_VU_VU_I
+#undef VI_VI_VI_I
+
 end module __ppc_intrinsics

diff  --git a/flang/test/Lower/PowerPC/ppc-vec-shift.f90 b/flang/test/Lower/PowerPC/ppc-vec-shift.f90
new file mode 100644
index 00000000000000..0d6aacc01b585b
--- /dev/null
+++ b/flang/test/Lower/PowerPC/ppc-vec-shift.f90
@@ -0,0 +1,4466 @@
+! RUN: bbc -emit-fir %s -o - | FileCheck --check-prefixes="CHECK-FIR" %s
+! RUN: %flang_fc1 -emit-fir %s -o - | fir-opt --fir-to-llvm-ir | FileCheck --check-prefixes="CHECK-LLVMIR" %s
+! RUN: %flang_fc1 -emit-llvm %s -o - | FileCheck --check-prefixes="CHECK" %s
+! REQUIRES: target=powerpc{{.*}}
+
+!----------------------
+! vec_sl
+!----------------------
+
+! CHECK-LABEL: vec_sl_i1
+subroutine vec_sl_i1(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %c8_i8 = arith.constant 8 : i8
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c8_i8 : i8 to vector<16xi8>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = arith.shli %[[varg1]], %[[msk]] : vector<16xi8>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<8> : vector<16xi8>) : vector<16xi8>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<16xi8>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.shl %[[arg1]], %[[msk]]  : vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <16 x i8> %[[arg2]], <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+! CHECK: %7 = shl <16 x i8> %[[arg1]], %[[msk]]
+end subroutine vec_sl_i1
+
+! CHECK-LABEL: vec_sl_i2
+subroutine vec_sl_i2(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %c16_i16 = arith.constant 16 : i16
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c16_i16 : i16 to vector<8xi16>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<8xi16>
+! CHECK-FIR: %[[r:.*]] = arith.shli %[[varg1]], %[[msk]] : vector<8xi16>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<16> : vector<8xi16>) : vector<8xi16>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<8xi16>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.shl %[[arg1]], %[[msk]]  : vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <8 x i16> %[[arg2]], <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+! CHECK: %7 = shl <8 x i16> %[[arg1]], %[[msk]]
+end subroutine vec_sl_i2
+
+! CHECK-LABEL: vec_sl_i4
+subroutine vec_sl_i4(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_sl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %c32_i32 = arith.constant 32 : i32
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c32_i32 : i32 to vector<4xi32>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<4xi32>
+! CHECK-FIR: %[[r:.*]] = arith.shli %[[varg1]], %[[msk]] : vector<4xi32>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<32> : vector<4xi32>) : vector<4xi32>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.shl %[[arg1]], %[[msk]]  : vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <4 x i32> %[[arg2]], <i32 32, i32 32, i32 32, i32 32>
+! CHECK: %7 = shl <4 x i32> %[[arg1]], %[[msk]]
+end subroutine vec_sl_i4
+
+! CHECK-LABEL: vec_sl_i8
+subroutine vec_sl_i8(arg1, arg2)
+  vector(integer(8)) :: arg1, r
+  vector(unsigned(8)) :: arg2
+  r = vec_sl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %c64_i64 = arith.constant 64 : i64
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c64_i64 : i64 to vector<2xi64>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<2xi64>
+! CHECK-FIR: %[[r:.*]] = arith.shli %[[varg1]], %[[msk]] : vector<2xi64>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<2xi64>) -> !fir.vector<2:i64>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<64> : vector<2xi64>) : vector<2xi64>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<2xi64>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.shl %[[arg1]], %[[msk]]  : vector<2xi64>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <2 x i64> %[[arg2]], <i64 64, i64 64>
+! CHECK: %7 = shl <2 x i64> %[[arg1]], %[[msk]]
+end subroutine vec_sl_i8
+
+! CHECK-LABEL: vec_sl_u1
+subroutine vec_sl_u1(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %c8_i8 = arith.constant 8 : i8
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c8_i8 : i8 to vector<16xi8>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = arith.shli %[[varg1]], %[[msk]] : vector<16xi8>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<8> : vector<16xi8>) : vector<16xi8>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<16xi8>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.shl %[[arg1]], %[[msk]]  : vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <16 x i8> %[[arg2]], <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+! CHECK: %7 = shl <16 x i8> %[[arg1]], %[[msk]]
+end subroutine vec_sl_u1
+
+! CHECK-LABEL: vec_sl_u2
+subroutine vec_sl_u2(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %c16_i16 = arith.constant 16 : i16
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c16_i16 : i16 to vector<8xi16>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<8xi16>
+! CHECK-FIR: %[[r:.*]] = arith.shli %[[varg1]], %[[msk]] : vector<8xi16>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<16> : vector<8xi16>) : vector<8xi16>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<8xi16>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.shl %[[arg1]], %[[msk]]  : vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <8 x i16> %[[arg2]], <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+! CHECK: %7 = shl <8 x i16> %[[arg1]], %[[msk]]
+end subroutine vec_sl_u2
+
+! CHECK-LABEL: vec_sl_u4
+subroutine vec_sl_u4(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_sl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %c32_i32 = arith.constant 32 : i32
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c32_i32 : i32 to vector<4xi32>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<4xi32>
+! CHECK-FIR: %[[r:.*]] = arith.shli %[[varg1]], %[[msk]] : vector<4xi32>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<32> : vector<4xi32>) : vector<4xi32>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.shl %[[arg1]], %[[msk]]  : vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <4 x i32> %[[arg2]], <i32 32, i32 32, i32 32, i32 32>
+! CHECK: %7 = shl <4 x i32> %[[arg1]], %[[msk]]
+end subroutine vec_sl_u4
+
+! CHECK-LABEL: vec_sl_u8
+subroutine vec_sl_u8(arg1, arg2)
+  vector(unsigned(8)) :: arg1, r
+  vector(unsigned(8)) :: arg2
+  r = vec_sl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %c64_i64 = arith.constant 64 : i64
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c64_i64 : i64 to vector<2xi64>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<2xi64>
+! CHECK-FIR: %[[r:.*]] = arith.shli %[[varg1]], %[[msk]] : vector<2xi64>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<2xi64>) -> !fir.vector<2:ui64>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<64> : vector<2xi64>) : vector<2xi64>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<2xi64>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.shl %[[arg1]], %[[msk]]  : vector<2xi64>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <2 x i64> %[[arg2]], <i64 64, i64 64>
+! CHECK: %{{[0-9]+}} = shl <2 x i64> %[[arg1]], %[[msk]]
+end subroutine vec_sl_u8
+
+!----------------------
+! vec_sll
+!----------------------
+! CHECK-LABEL: vec_sll_i1u1
+subroutine vec_sll_i1u1(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_sll_i1u1
+
+! CHECK-LABEL: vec_sll_i2u1
+subroutine vec_sll_i2u1(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_sll_i2u1
+
+! CHECK-LABEL: vec_sll_i4u1
+subroutine vec_sll_i4u1(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_sll_i4u1
+
+! CHECK-LABEL: vec_sll_i1u2
+subroutine vec_sll_i1u2(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR:    %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_sll_i1u2
+
+! CHECK-LABEL: vec_sll_i2u2
+subroutine vec_sll_i2u2(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_sll_i2u2
+
+! CHECK-LABEL: vec_sll_i4u2
+subroutine vec_sll_i4u2(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_sll_i4u2
+
+! CHECK-LABEL: vec_sll_i1u4
+subroutine vec_sll_i1u4(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR:    %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[arg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_sll_i1u4
+
+! CHECK-LABEL: vec_sll_i2u4
+subroutine vec_sll_i2u4(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[arg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_sll_i2u4
+
+! CHECK-LABEL: vec_sll_i4u4
+subroutine vec_sll_i4u4(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[arg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
+end subroutine vec_sll_i4u4
+
+! CHECK-LABEL: vec_sll_u1u1
+subroutine vec_sll_u1u1(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR:    %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_sll_u1u1
+
+! CHECK-LABEL: vec_sll_u2u1
+subroutine vec_sll_u2u1(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_sll_u2u1
+
+! CHECK-LABEL: vec_sll_u4u1
+subroutine vec_sll_u4u1(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vsl(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_sll_u4u1
+
+! CHECK-LABEL: vec_sll_u1u2
+subroutine vec_sll_u1u2(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR:    %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_sll_u1u2
+
+! CHECK-LABEL: vec_sll_u2u2
+subroutine vec_sll_u2u2(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_sll_u2u2
+
+! CHECK-LABEL: vec_sll_u4u2
+subroutine vec_sll_u4u2(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vsl(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_sll_u4u2
+
+! CHECK-LABEL: vec_sll_u1u4
+subroutine vec_sll_u1u4(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR:    %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[arg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_sll_u1u4
+
+! CHECK-LABEL: vec_sll_u2u4
+subroutine vec_sll_u2u4(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[bc1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[varg1]], <4 x i32> %[[arg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_sll_u2u4
+
+! CHECK-LABEL: vec_sll_u4u4
+subroutine vec_sll_u4u4(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_sll(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsl(%[[varg1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vsl(%[[arg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vsl(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
+end subroutine vec_sll_u4u4
+
+!----------------------
+! vec_slo
+!----------------------
+
+! CHECK-LABEL: vec_slo_i1u1
+subroutine vec_slo_i1u1(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_slo_i1u1
+
+! CHECK-LABEL: vec_slo_i2u1
+subroutine vec_slo_i2u1(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_slo_i2u1
+
+! CHECK-LABEL: vec_slo_i4u1
+subroutine vec_slo_i4u1(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vslo(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_slo_i4u1
+
+! CHECK-LABEL: vec_slo_u1u1
+subroutine vec_slo_u1u1(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_slo_u1u1
+
+! CHECK-LABEL: vec_slo_u2u1
+subroutine vec_slo_u2u1(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_slo_u2u1
+
+! CHECK-LABEL: vec_slo_u4u1
+subroutine vec_slo_u4u1(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vslo(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_slo_u4u1
+
+! CHECK-LABEL: vec_slo_r4u1
+subroutine vec_slo_r4u1(arg1, arg2)
+  vector(real(4)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<4xf32> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xf32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xf32>) -> !fir.vector<4:f32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<4xf32>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <4 x float>
+end subroutine vec_slo_r4u1
+
+! CHECK-LABEL: vec_slo_i1u2
+subroutine vec_slo_i1u2(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_slo_i1u2
+
+! CHECK-LABEL: vec_slo_i2u2
+subroutine vec_slo_i2u2(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_slo_i2u2
+
+! CHECK-LABEL: vec_slo_i4u2
+subroutine vec_slo_i4u2(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vslo(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_slo_i4u2
+
+! CHECK-LABEL: vec_slo_u1u2
+subroutine vec_slo_u1u2(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_slo_u1u2
+
+! CHECK-LABEL: vec_slo_u2u2
+subroutine vec_slo_u2u2(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+
+end subroutine vec_slo_u2u2
+
+! CHECK-LABEL: vec_slo_u4u2
+subroutine vec_slo_u4u2(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vslo(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_slo_u4u2
+
+! CHECK-LABEL: vec_slo_r4u2
+subroutine vec_slo_r4u2(arg1, arg2)
+  vector(real(4)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_slo(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<4xf32> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vslo(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xf32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xf32>) -> !fir.vector<4:f32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vslo(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<4xf32>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vslo(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <4 x float>
+end subroutine vec_slo_r4u2
+
+!----------------------
+! vec_sr
+!----------------------
+! CHECK-LABEL: vec_sr_i1
+subroutine vec_sr_i1(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sr(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %c8_i8 = arith.constant 8 : i8
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c8_i8 : i8 to vector<16xi8>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = arith.shrui %[[varg1]], %[[msk]] : vector<16xi8>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<8> : vector<16xi8>) : vector<16xi8>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<16xi8>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.lshr %[[arg1]], %[[msk]]  : vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <16 x i8> %[[arg2]], <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+! CHECK: %7 = lshr <16 x i8> %[[arg1]], %[[msk]]
+end subroutine vec_sr_i1
+
+! CHECK-LABEL: vec_sr_i2
+subroutine vec_sr_i2(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sr(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %c16_i16 = arith.constant 16 : i16
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c16_i16 : i16 to vector<8xi16>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<8xi16>
+! CHECK-FIR: %[[r:.*]] = arith.shrui %[[varg1]], %[[msk]] : vector<8xi16>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<16> : vector<8xi16>) : vector<8xi16>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<8xi16>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.lshr %[[arg1]], %[[msk]]  : vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <8 x i16> %[[arg2]], <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+! CHECK: %7 = lshr <8 x i16> %[[arg1]], %[[msk]]
+end subroutine vec_sr_i2
+
+! CHECK-LABEL: vec_sr_i4
+subroutine vec_sr_i4(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_sr(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %c32_i32 = arith.constant 32 : i32
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c32_i32 : i32 to vector<4xi32>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<4xi32>
+! CHECK-FIR: %[[r:.*]] = arith.shrui %[[varg1]], %[[msk]] : vector<4xi32>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<32> : vector<4xi32>) : vector<4xi32>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.lshr %[[arg1]], %[[msk]]  : vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <4 x i32> %[[arg2]], <i32 32, i32 32, i32 32, i32 32>
+! CHECK: %7 = lshr <4 x i32> %[[arg1]], %[[msk]]
+end subroutine vec_sr_i4
+
+! CHECK-LABEL: vec_sr_i8
+subroutine vec_sr_i8(arg1, arg2)
+  vector(integer(8)) :: arg1, r
+  vector(unsigned(8)) :: arg2
+  r = vec_sr(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %c64_i64 = arith.constant 64 : i64
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c64_i64 : i64 to vector<2xi64>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<2xi64>
+! CHECK-FIR: %[[r:.*]] = arith.shrui %[[varg1]], %[[msk]] : vector<2xi64>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<2xi64>) -> !fir.vector<2:i64>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<64> : vector<2xi64>) : vector<2xi64>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<2xi64>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.lshr %[[arg1]], %[[msk]]  : vector<2xi64>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <2 x i64> %[[arg2]], <i64 64, i64 64>
+! CHECK: %7 = lshr <2 x i64> %[[arg1]], %[[msk]]
+end subroutine vec_sr_i8
+
+! CHECK-LABEL: vec_sr_u1
+subroutine vec_sr_u1(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sr(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %c8_i8 = arith.constant 8 : i8
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c8_i8 : i8 to vector<16xi8>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = arith.shrui %[[varg1]], %[[msk]] : vector<16xi8>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<8> : vector<16xi8>) : vector<16xi8>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<16xi8>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.lshr %[[arg1]], %[[msk]]  : vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <16 x i8> %[[arg2]], <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+! CHECK: %7 = lshr <16 x i8> %[[arg1]], %[[msk]]
+end subroutine vec_sr_u1
+
+! CHECK-LABEL: vec_sr_u2
+subroutine vec_sr_u2(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sr(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %c16_i16 = arith.constant 16 : i16
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c16_i16 : i16 to vector<8xi16>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<8xi16>
+! CHECK-FIR: %[[r:.*]] = arith.shrui %[[varg1]], %[[msk]] : vector<8xi16>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<16> : vector<8xi16>) : vector<8xi16>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<8xi16>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.lshr %[[arg1]], %[[msk]]  : vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <8 x i16> %[[arg2]], <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+! CHECK: %7 = lshr <8 x i16> %[[arg1]], %[[msk]]
+end subroutine vec_sr_u2
+
+! CHECK-LABEL: vec_sr_u4
+subroutine vec_sr_u4(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_sr(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %c32_i32 = arith.constant 32 : i32
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c32_i32 : i32 to vector<4xi32>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<4xi32>
+! CHECK-FIR: %[[r:.*]] = arith.shrui %[[varg1]], %[[msk]] : vector<4xi32>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<32> : vector<4xi32>) : vector<4xi32>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.lshr %[[arg1]], %[[msk]]  : vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <4 x i32> %[[arg2]], <i32 32, i32 32, i32 32, i32 32>
+! CHECK: %7 = lshr <4 x i32> %[[arg1]], %[[msk]]
+end subroutine vec_sr_u4
+
+! CHECK-LABEL: vec_sr_u8
+subroutine vec_sr_u8(arg1, arg2)
+  vector(unsigned(8)) :: arg1, r
+  vector(unsigned(8)) :: arg2
+  r = vec_sr(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %c64_i64 = arith.constant 64 : i64
+! CHECK-FIR: %[[cv:.*]] = vector.broadcast %c64_i64 : i64 to vector<2xi64>
+! CHECK-FIR: %[[msk:.*]] = arith.remui %[[varg2]], %[[cv]] : vector<2xi64>
+! CHECK-FIR: %[[r:.*]] = arith.shrui %[[varg1]], %[[msk]] : vector<2xi64>
+! CHECK-FIR: %{{[0-9]}} = fir.convert %[[r]] : (vector<2xi64>) -> !fir.vector<2:ui64>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[cv:.*]] = llvm.mlir.constant(dense<64> : vector<2xi64>) : vector<2xi64>
+! CHECK-LLVMIR: %[[msk:.*]] = llvm.urem %[[arg2]], %[[cv]]  : vector<2xi64>
+! CHECK-LLVMIR: %{{[0-9]}} = llvm.lshr %[[arg1]], %[[msk]]  : vector<2xi64>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[msk:.*]] = urem <2 x i64> %[[arg2]], <i64 64, i64 64>
+! CHECK: %7 = lshr <2 x i64> %[[arg1]], %[[msk]]
+end subroutine vec_sr_u8
+
+!----------------------
+! vec_srl
+!----------------------
+! CHECK-LABEL: vec_srl_i1u1
+subroutine vec_srl_i1u1(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_srl_i1u1
+
+! CHECK-LABEL: vec_srl_i2u1
+subroutine vec_srl_i2u1(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_srl_i2u1
+
+! CHECK-LABEL: vec_srl_i4u1
+subroutine vec_srl_i4u1(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_srl_i4u1
+
+! CHECK-LABEL: vec_srl_i1u2
+subroutine vec_srl_i1u2(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR:    %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_srl_i1u2
+
+! CHECK-LABEL: vec_srl_i2u2
+subroutine vec_srl_i2u2(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_srl_i2u2
+
+! CHECK-LABEL: vec_srl_i4u2
+subroutine vec_srl_i4u2(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_srl_i4u2
+
+! CHECK-LABEL: vec_srl_i1u4
+subroutine vec_srl_i1u4(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR:    %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[arg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_srl_i1u4
+
+! CHECK-LABEL: vec_srl_i2u4
+subroutine vec_srl_i2u4(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[arg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_srl_i2u4
+
+! CHECK-LABEL: vec_srl_i4u4
+subroutine vec_srl_i4u4(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[arg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
+end subroutine vec_srl_i4u4
+
+! CHECK-LABEL: vec_srl_u1u1
+subroutine vec_srl_u1u1(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR:    %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_srl_u1u1
+
+! CHECK-LABEL: vec_srl_u2u1
+subroutine vec_srl_u2u1(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_srl_u2u1
+
+! CHECK-LABEL: vec_srl_u4u1
+subroutine vec_srl_u4u1(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vsr(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_srl_u4u1
+
+! CHECK-LABEL: vec_srl_u1u2
+subroutine vec_srl_u1u2(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR:    %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_srl_u1u2
+
+! CHECK-LABEL: vec_srl_u2u2
+subroutine vec_srl_u2u2(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_srl_u2u2
+
+! CHECK-LABEL: vec_srl_u4u2
+subroutine vec_srl_u4u2(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vsr(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_srl_u4u2
+
+! CHECK-LABEL: vec_srl_u1u4
+subroutine vec_srl_u1u4(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR:    %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[arg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_srl_u1u4
+
+! CHECK-LABEL: vec_srl_u2u4
+subroutine vec_srl_u2u4(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[bc1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[varg1]], <4 x i32> %[[arg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_srl_u2u4
+
+! CHECK-LABEL: vec_srl_u4u4
+subroutine vec_srl_u4u4(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(4)) :: arg2
+  r = vec_srl(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsr(%[[varg1]], %[[varg2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vsr(%[[arg1]], %[[arg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
+end subroutine vec_srl_u4u4
+
+!----------------------
+! vec_sro
+!----------------------
+
+! CHECK-LABEL: vec_sro_i1u1
+subroutine vec_sro_i1u1(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_sro_i1u1
+
+! CHECK-LABEL: vec_sro_i2u1
+subroutine vec_sro_i2u1(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_sro_i2u1
+
+! CHECK-LABEL: vec_sro_i4u1
+subroutine vec_sro_i4u1(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vsro(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_sro_i4u1
+
+! CHECK-LABEL: vec_sro_u1u1
+subroutine vec_sro_u1u1(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_sro_u1u1
+
+! CHECK-LABEL: vec_sro_u2u1
+subroutine vec_sro_u2u1(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_sro_u2u1
+
+! CHECK-LABEL: vec_sro_u4u1
+subroutine vec_sro_u4u1(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vsro(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_sro_u4u1
+
+! CHECK-LABEL: vec_sro_r4u1
+subroutine vec_sro_r4u1(arg1, arg2)
+  vector(real(4)) :: arg1, r
+  vector(unsigned(1)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<4xf32> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xf32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xf32>) -> !fir.vector<4:f32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<4xf32>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <16 x i8> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <4 x float>
+end subroutine vec_sro_r4u1
+
+!-------------------------------------
+
+! CHECK-LABEL: vec_sro_i1u2
+subroutine vec_sro_i1u2(arg1, arg2)
+  vector(integer(1)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:i8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_sro_i1u2
+
+! CHECK-LABEL: vec_sro_i2u2
+subroutine vec_sro_i2u2(arg1, arg2)
+  vector(integer(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:i16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+end subroutine vec_sro_i2u2
+
+! CHECK-LABEL: vec_sro_i4u2
+subroutine vec_sro_i4u2(arg1, arg2)
+  vector(integer(4)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vsro(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_sro_i4u2
+
+! CHECK-LABEL: vec_sro_u1u2
+subroutine vec_sro_u1u2(arg1, arg2)
+  vector(unsigned(1)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<16xi8>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <16 x i8>
+end subroutine vec_sro_u1u2
+
+! CHECK-LABEL: vec_sro_u2u2
+subroutine vec_sro_u2u2(arg1, arg2)
+  vector(unsigned(2)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<8xi16>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<8xi16>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <8 x i16>
+
+end subroutine vec_sro_u2u2
+
+! CHECK-LABEL: vec_sro_u4u2
+subroutine vec_sro_u4u2(arg1, arg2)
+  vector(unsigned(4)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xi32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.call @llvm.ppc.altivec.vsro(%[[arg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[arg1]], <4 x i32> %[[varg2]])
+end subroutine vec_sro_u4u2
+
+! CHECK-LABEL: vec_sro_r4u2
+subroutine vec_sro_r4u2(arg1, arg2)
+  vector(real(4)) :: arg1, r
+  vector(unsigned(2)) :: arg2
+  r = vec_sro(arg1, arg2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[varg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[varg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[bc1:.*]] = vector.bitcast %[[varg1]] : vector<4xf32> to vector<4xi32>
+! CHECK-FIR: %[[bc2:.*]] = vector.bitcast %[[varg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-FIR: %[[res:.*]] = fir.call @llvm.ppc.altivec.vsro(%[[bc1]], %[[bc2]]) fastmath<contract> : (vector<4xi32>, vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: %[[vres:.*]] = fir.convert %[[res]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[bcres:.*]] = vector.bitcast %[[vres]] : vector<4xi32> to vector<4xf32>
+! CHECK-FIR: %{{[0-9]+}} = fir.convert %[[bcres]] : (vector<4xf32>) -> !fir.vector<4:f32>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load {{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[varg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<4xi32>
+! CHECK-LLVMIR: %[[varg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<4xi32>
+! CHECK-LLVMIR: %[[res:.*]] = llvm.call @llvm.ppc.altivec.vsro(%[[varg1]], %[[varg2]]) {fastmathFlags = #llvm.fastmath<contract>} : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+! CHECK-LLVMIR: %{{[0-9]+}} = llvm.bitcast %[[res]] : vector<4xi32> to vector<4xf32>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[varg1:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32>
+! CHECK: %[[varg2:.*]] = bitcast <8 x i16> %[[arg2]] to <4 x i32>
+! CHECK: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %[[varg1]], <4 x i32> %[[varg2]])
+! CHECK: %{{[0-9]+}} = bitcast <4 x i32> %[[res]] to <4 x float>
+end subroutine vec_sro_r4u2
+
+!----------------------
+! vec_sld
+!----------------------
+
+! CHECK-LABEL: vec_sld_test_i1i1
+subroutine vec_sld_test_i1i1(arg1, arg2)
+  vector(integer(1)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i1i1
+
+! CHECK-LABEL: vec_sld_test_i1i2
+subroutine vec_sld_test_i1i2(arg1, arg2)
+  vector(integer(1)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i1i2
+
+! CHECK-LABEL: vec_sld_test_i1i4
+subroutine vec_sld_test_i1i4(arg1, arg2)
+  vector(integer(1)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i1i4
+
+! CHECK-LABEL: vec_sld_test_i1i8
+subroutine vec_sld_test_i1i8(arg1, arg2)
+  vector(integer(1)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i1i8
+
+! CHECK-LABEL: vec_sld_test_i2i1
+subroutine vec_sld_test_i2i1(arg1, arg2)
+  vector(integer(2)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i2i1
+
+! CHECK-LABEL: vec_sld_test_i2i2
+subroutine vec_sld_test_i2i2(arg1, arg2)
+  vector(integer(2)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i2i2
+
+! CHECK-LABEL: vec_sld_test_i2i4
+subroutine vec_sld_test_i2i4(arg1, arg2)
+  vector(integer(2)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i2i4
+
+! CHECK-LABEL: vec_sld_test_i2i8
+subroutine vec_sld_test_i2i8(arg1, arg2)
+  vector(integer(2)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i2i8
+
+! CHECK-LABEL: vec_sld_test_i4i1
+subroutine vec_sld_test_i4i1(arg1, arg2)
+  vector(integer(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i4i1
+
+! CHECK-LABEL: vec_sld_test_i4i2
+subroutine vec_sld_test_i4i2(arg1, arg2)
+  vector(integer(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i4i2
+
+! CHECK-LABEL: vec_sld_test_i4i4
+subroutine vec_sld_test_i4i4(arg1, arg2)
+  vector(integer(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i4i4
+
+! CHECK-LABEL: vec_sld_test_i4i8
+subroutine vec_sld_test_i4i8(arg1, arg2)
+  vector(integer(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_i4i8
+
+! CHECK-LABEL: vec_sld_test_u1i1
+subroutine vec_sld_test_u1i1(arg1, arg2)
+  vector(unsigned(1)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u1i1
+
+! CHECK-LABEL: vec_sld_test_u1i2
+subroutine vec_sld_test_u1i2(arg1, arg2)
+  vector(unsigned(1)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u1i2
+
+! CHECK-LABEL: vec_sld_test_u1i4
+subroutine vec_sld_test_u1i4(arg1, arg2)
+  vector(unsigned(1)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u1i4
+
+! CHECK-LABEL: vec_sld_test_u1i8
+subroutine vec_sld_test_u1i8(arg1, arg2)
+  vector(unsigned(1)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u1i8
+
+! CHECK-LABEL: vec_sld_test_u2i1
+subroutine vec_sld_test_u2i1(arg1, arg2)
+  vector(unsigned(2)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u2i1
+
+! CHECK-LABEL: vec_sld_test_u2i2
+subroutine vec_sld_test_u2i2(arg1, arg2)
+  vector(unsigned(2)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u2i2
+
+! CHECK-LABEL: vec_sld_test_u2i4
+subroutine vec_sld_test_u2i4(arg1, arg2)
+  vector(unsigned(2)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u2i4
+
+! CHECK-LABEL: vec_sld_test_u2i8
+subroutine vec_sld_test_u2i8(arg1, arg2)
+  vector(unsigned(2)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u2i8
+
+! CHECK-LABEL: vec_sld_test_u4i1
+subroutine vec_sld_test_u4i1(arg1, arg2)
+  vector(unsigned(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u4i1
+
+! CHECK-LABEL: vec_sld_test_u4i2
+subroutine vec_sld_test_u4i2(arg1, arg2)
+  vector(unsigned(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u4i2
+
+! CHECK-LABEL: vec_sld_test_u4i4
+subroutine vec_sld_test_u4i4(arg1, arg2)
+  vector(unsigned(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u4i4
+
+! CHECK-LABEL: vec_sld_test_u4i8
+subroutine vec_sld_test_u4i8(arg1, arg2)
+  vector(unsigned(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_u4i8
+
+! CHECK-LABEL: vec_sld_test_r4i1
+subroutine vec_sld_test_r4i1(arg1, arg2)
+  vector(real(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xf32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x float>
+! CHECK: store <4 x float> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_r4i1
+
+! CHECK-LABEL: vec_sld_test_r4i2
+subroutine vec_sld_test_r4i2(arg1, arg2)
+  vector(real(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xf32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x float>
+! CHECK: store <4 x float> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_r4i2
+
+! CHECK-LABEL: vec_sld_test_r4i4
+subroutine vec_sld_test_r4i4(arg1, arg2)
+  vector(real(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xf32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x float>
+! CHECK: store <4 x float> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_r4i4
+
+! CHECK-LABEL: vec_sld_test_r4i8
+subroutine vec_sld_test_r4i8(arg1, arg2)
+  vector(real(4)) :: arg1, arg2, r
+  r = vec_sld(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28] : vector<16xi8>
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xf32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x float>
+! CHECK: store <4 x float> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sld_test_r4i8
+
+!----------------------
+! vec_sldw
+!----------------------
+! CHECK-LABEL: vec_sldw_test_i1i1
+subroutine vec_sldw_test_i1i1(arg1, arg2)
+  vector(integer(1)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i1i1
+
+! CHECK-LABEL: vec_sldw_test_i1i2
+subroutine vec_sldw_test_i1i2(arg1, arg2)
+  vector(integer(1)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i1i2
+
+! CHECK-LABEL: vec_sldw_test_i1i4
+subroutine vec_sldw_test_i1i4(arg1, arg2)
+  vector(integer(1)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i1i4
+
+! CHECK-LABEL: vec_sldw_test_i1i8
+subroutine vec_sldw_test_i1i8(arg1, arg2)
+  vector(integer(1)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:i8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:i8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:i8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i1i8
+
+! CHECK-LABEL: vec_sldw_test_i2i1
+subroutine vec_sldw_test_i2i1(arg1, arg2)
+  vector(integer(2)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i2i1
+
+! CHECK-LABEL: vec_sldw_test_i2i2
+subroutine vec_sldw_test_i2i2(arg1, arg2)
+  vector(integer(2)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i2i2
+
+! CHECK-LABEL: vec_sldw_test_i2i4
+subroutine vec_sldw_test_i2i4(arg1, arg2)
+  vector(integer(2)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i2i4
+
+! CHECK-LABEL: vec_sldw_test_i2i8
+subroutine vec_sldw_test_i2i8(arg1, arg2)
+  vector(integer(2)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:i16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:i16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:i16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i2i8
+
+! CHECK-LABEL: vec_sldw_test_i4i1
+subroutine vec_sldw_test_i4i1(arg1, arg2)
+  vector(integer(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i4i1
+
+! CHECK-LABEL: vec_sldw_test_i4i2
+subroutine vec_sldw_test_i4i2(arg1, arg2)
+  vector(integer(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i4i2
+
+! CHECK-LABEL: vec_sldw_test_i4i4
+subroutine vec_sldw_test_i4i4(arg1, arg2)
+  vector(integer(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i4i4
+
+! CHECK-LABEL: vec_sldw_test_i4i8
+subroutine vec_sldw_test_i4i8(arg1, arg2)
+  vector(integer(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:i32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:i32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:i32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i4i8
+
+! CHECK-LABEL: vec_sldw_test_i8i1
+subroutine vec_sldw_test_i8i1(arg1, arg2)
+  vector(integer(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xi64>) -> !fir.vector<2:i64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xi64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x i64>
+! CHECK: store <2 x i64> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i8i1
+
+! CHECK-LABEL: vec_sldw_test_i8i2
+subroutine vec_sldw_test_i8i2(arg1, arg2)
+  vector(integer(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xi64>) -> !fir.vector<2:i64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xi64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x i64>
+! CHECK: store <2 x i64> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i8i2
+
+! CHECK-LABEL: vec_sldw_test_i8i4
+subroutine vec_sldw_test_i8i4(arg1, arg2)
+  vector(integer(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xi64>) -> !fir.vector<2:i64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xi64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x i64>
+! CHECK: store <2 x i64> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i8i4
+
+! CHECK-LABEL: vec_sldw_test_i8i8
+subroutine vec_sldw_test_i8i8(arg1, arg2)
+  vector(integer(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:i64>) -> vector<2xi64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xi64>) -> !fir.vector<2:i64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:i64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xi64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x i64>
+! CHECK: store <2 x i64> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_i8i8
+
+! CHECK-LABEL: vec_sldw_test_u1i1
+subroutine vec_sldw_test_u1i1(arg1, arg2)
+  vector(unsigned(1)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u1i1
+
+! CHECK-LABEL: vec_sldw_test_u1i2
+subroutine vec_sldw_test_u1i2(arg1, arg2)
+  vector(unsigned(1)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u1i2
+
+! CHECK-LABEL: vec_sldw_test_u1i4
+subroutine vec_sldw_test_u1i4(arg1, arg2)
+  vector(unsigned(1)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u1i4
+
+! CHECK-LABEL: vec_sldw_test_u1i8
+subroutine vec_sldw_test_u1i8(arg1, arg2)
+  vector(unsigned(1)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<16:ui8>) -> vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[carg2]], %[[carg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[r]] : (vector<16xi8>) -> !fir.vector<16:ui8>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<16:ui8>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<16xi8>>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[arg2]], %[[arg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: llvm.store %[[r]], %{{.*}} : !llvm.ptr<vector<16xi8>>
+
+! CHECK: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[arg2]], <16 x i8> %[[arg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: store <16 x i8> %[[r]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u1i8
+
+! CHECK-LABEL: vec_sldw_test_u2i1
+subroutine vec_sldw_test_u2i1(arg1, arg2)
+  vector(unsigned(2)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u2i1
+
+! CHECK-LABEL: vec_sldw_test_u2i2
+subroutine vec_sldw_test_u2i2(arg1, arg2)
+  vector(unsigned(2)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u2i2
+
+! CHECK-LABEL: vec_sldw_test_u2i4
+subroutine vec_sldw_test_u2i4(arg1, arg2)
+  vector(unsigned(2)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u2i4
+
+! CHECK-LABEL: vec_sldw_test_u2i8
+subroutine vec_sldw_test_u2i8(arg1, arg2)
+  vector(unsigned(2)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<8:ui16>) -> vector<8xi16>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<8xi16>) -> !fir.vector<8:ui16>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<8:ui16>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<8xi16>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<8xi16> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<8xi16>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<8xi16>>
+
+! CHECK: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <8 x i16> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <8 x i16> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <8 x i16>
+! CHECK: store <8 x i16> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u2i8
+
+! CHECK-LABEL: vec_sldw_test_u4i1
+subroutine vec_sldw_test_u4i1(arg1, arg2)
+  vector(unsigned(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u4i1
+
+! CHECK-LABEL: vec_sldw_test_u4i2
+subroutine vec_sldw_test_u4i2(arg1, arg2)
+  vector(unsigned(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u4i2
+
+! CHECK-LABEL: vec_sldw_test_u4i4
+subroutine vec_sldw_test_u4i4(arg1, arg2)
+  vector(unsigned(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u4i4
+
+! CHECK-LABEL: vec_sldw_test_u4i8
+subroutine vec_sldw_test_u4i8(arg1, arg2)
+  vector(unsigned(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:ui32>) -> vector<4xi32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xi32>) -> !fir.vector<4:ui32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:ui32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xi32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xi32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xi32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xi32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x i32> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x i32> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x i32>
+! CHECK: store <4 x i32> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u4i8
+
+! CHECK-LABEL: vec_sldw_test_u8i1
+subroutine vec_sldw_test_u8i1(arg1, arg2)
+  vector(unsigned(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xi64>) -> !fir.vector<2:ui64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xi64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x i64>
+! CHECK: store <2 x i64> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u8i1
+
+! CHECK-LABEL: vec_sldw_test_u8i2
+subroutine vec_sldw_test_u8i2(arg1, arg2)
+  vector(unsigned(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xi64>) -> !fir.vector<2:ui64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xi64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x i64>
+! CHECK: store <2 x i64> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u8i2
+
+! CHECK-LABEL: vec_sldw_test_u8i4
+subroutine vec_sldw_test_u8i4(arg1, arg2)
+  vector(unsigned(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xi64>) -> !fir.vector<2:ui64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xi64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x i64>
+! CHECK: store <2 x i64> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u8i4
+
+! CHECK-LABEL: vec_sldw_test_u8i8
+subroutine vec_sldw_test_u8i8(arg1, arg2)
+  vector(unsigned(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:ui64>) -> vector<2xi64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xi64>) -> !fir.vector<2:ui64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:ui64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xi64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xi64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xi64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xi64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x i64> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x i64> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x i64>
+! CHECK: store <2 x i64> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_u8i8
+
+! CHECK-LABEL: vec_sldw_test_r4i1
+subroutine vec_sldw_test_r4i1(arg1, arg2)
+  vector(real(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xf32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x float>
+! CHECK: store <4 x float> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_r4i1
+
+! CHECK-LABEL: vec_sldw_test_r4i2
+subroutine vec_sldw_test_r4i2(arg1, arg2)
+  vector(real(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xf32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x float>
+! CHECK: store <4 x float> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_r4i2
+
+! CHECK-LABEL: vec_sldw_test_r4i4
+subroutine vec_sldw_test_r4i4(arg1, arg2)
+  vector(real(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xf32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x float>
+! CHECK: store <4 x float> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_r4i4
+
+! CHECK-LABEL: vec_sldw_test_r4i8
+subroutine vec_sldw_test_r4i8(arg1, arg2)
+  vector(real(4)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<4:f32>) -> vector<4xf32>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<4xf32>) -> !fir.vector<4:f32>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<4:f32>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<4xf32>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<4xf32> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<4xf32>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<4xf32>>
+
+! CHECK: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <4 x float>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <4 x float> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <4 x float> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <4 x float>
+! CHECK: store <4 x float> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_r4i8
+
+! CHECK-LABEL: vec_sldw_test_r8i1
+subroutine vec_sldw_test_r8i1(arg1, arg2)
+  vector(real(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_1)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xf64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xf64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xf64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xf64>) -> !fir.vector<2:f64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xf64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xf64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xf64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xf64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xf64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xf64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x double>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x double>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x double> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x double> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x double>
+! CHECK: store <2 x double> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_r8i1
+
+! CHECK-LABEL: vec_sldw_test_r8i2
+subroutine vec_sldw_test_r8i2(arg1, arg2)
+  vector(real(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_2)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xf64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xf64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xf64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xf64>) -> !fir.vector<2:f64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xf64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xf64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xf64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xf64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xf64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xf64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x double>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x double>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x double> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x double> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x double>
+! CHECK: store <2 x double> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_r8i2
+
+! CHECK-LABEL: vec_sldw_test_r8i4
+subroutine vec_sldw_test_r8i4(arg1, arg2)
+  vector(real(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_4)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xf64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xf64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xf64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xf64>) -> !fir.vector<2:f64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xf64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xf64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xf64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xf64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xf64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xf64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x double>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x double>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x double> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x double> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x double>
+! CHECK: store <2 x double> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_r8i4
+
+! CHECK-LABEL: vec_sldw_test_r8i8
+subroutine vec_sldw_test_r8i8(arg1, arg2)
+  vector(real(8)) :: arg1, arg2, r
+  r = vec_sldw(arg1, arg2, 3_8)
+
+! CHECK-FIR: %[[arg1:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+! CHECK-FIR: %[[arg2:.*]] = fir.load %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+! CHECK-FIR: %[[carg1:.*]] = fir.convert %[[arg1]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! CHECK-FIR: %[[carg2:.*]] = fir.convert %[[arg2]] : (!fir.vector<2:f64>) -> vector<2xf64>
+! CHECK-FIR: %[[barg1:.*]] = llvm.bitcast %[[carg1]] : vector<2xf64> to vector<16xi8>
+! CHECK-FIR: %[[barg2:.*]] = llvm.bitcast %[[carg2]] : vector<2xf64> to vector<16xi8>
+! CHECK-FIR: %[[r:.*]] = vector.shuffle %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8>, vector<16xi8>
+! CHECK-FIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xf64>
+! CHECK-FIR: %[[cr:.*]] = fir.convert %[[br]] : (vector<2xf64>) -> !fir.vector<2:f64>
+! CHECK-FIR: fir.store %[[cr]] to %{{.*}} : !fir.ref<!fir.vector<2:f64>>
+
+! CHECK-LLVMIR: %[[arg1:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xf64>>
+! CHECK-LLVMIR: %[[arg2:.*]] = llvm.load %{{.*}} : !llvm.ptr<vector<2xf64>>
+! CHECK-LLVMIR: %[[barg1:.*]] = llvm.bitcast %[[arg1]] : vector<2xf64> to vector<16xi8>
+! CHECK-LLVMIR: %[[barg2:.*]] = llvm.bitcast %[[arg2]] : vector<2xf64> to vector<16xi8>
+! CHECK-LLVMIR: %[[r:.*]] = llvm.shufflevector %[[barg2]], %[[barg1]] [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] : vector<16xi8> 
+! CHECK-LLVMIR: %[[br:.*]] = llvm.bitcast %[[r]] : vector<16xi8> to vector<2xf64>
+! CHECK-LLVMIR: llvm.store %[[br]], %{{.*}} : !llvm.ptr<vector<2xf64>>
+
+! CHECK: %[[arg1:.*]] = load <2 x double>, ptr %{{.*}}, align 16
+! CHECK: %[[arg2:.*]] = load <2 x double>, ptr %{{.*}}, align 16
+! CHECK: %[[barg1:.*]] = bitcast <2 x double> %[[arg1]] to <16 x i8>
+! CHECK: %[[barg2:.*]] = bitcast <2 x double> %[[arg2]] to <16 x i8>
+! CHECK: %[[r:.*]] = shufflevector <16 x i8> %[[barg2]], <16 x i8> %[[barg1]], <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+! CHECK: %[[br:.*]] = bitcast <16 x i8> %[[r]] to <2 x double>
+! CHECK: store <2 x double> %[[br]], ptr %{{.*}}, align 16
+end subroutine vec_sldw_test_r8i8

diff  --git a/flang/test/Semantics/PowerPC/ppc-vector-intrinsics.f90 b/flang/test/Semantics/PowerPC/ppc-vector-intrinsics.f90
new file mode 100644
index 00000000000000..7ad44d00e71f86
--- /dev/null
+++ b/flang/test/Semantics/PowerPC/ppc-vector-intrinsics.f90
@@ -0,0 +1,17 @@
+! RUN: %S/../test_errors.py %s %flang_fc1
+! REQUIRES: target=powerpc{{.*}}
+
+program test
+  vector(integer(4)) :: arg1, arg2, r
+  integer :: i
+
+!ERROR: Actual argument #3 must be a constant expression
+  r = vec_sld(arg1, arg2, i)
+!ERROR: Argument #3 must be a constant expression in range 0-15
+  r = vec_sld(arg1, arg2, 17)
+
+!ERROR: Actual argument #3 must be a constant expression
+  r = vec_sldw(arg1, arg2, i)
+!ERROR: Argument #3 must be a constant expression in range 0-3
+  r = vec_sldw(arg1, arg2, 5)
+end


        


More information about the flang-commits mailing list