[llvm-branch-commits] [llvm] 1365718 - [VE] Add vsll, vsrl, vsla, vsra, and vsfa intrinsic instructions

Kazushi Marukawa via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Thu Dec 3 06:24:59 PST 2020


Author: Kazushi (Jam) Marukawa
Date: 2020-12-03T23:19:58+09:00
New Revision: 1365718778b4ce05587afa22835282c5d3f835b7

URL: https://github.com/llvm/llvm-project/commit/1365718778b4ce05587afa22835282c5d3f835b7
DIFF: https://github.com/llvm/llvm-project/commit/1365718778b4ce05587afa22835282c5d3f835b7.diff

LOG: [VE] Add vsll, vsrl, vsla, vsra, and vsfa intrinsic instructions

Add vsll, vsrl, vsla, vsra, and vsfa intrinsic instructions and
regression tests.

Reviewed By: simoll

Differential Revision: https://reviews.llvm.org/D92550

Added: 
    llvm/test/CodeGen/VE/VELIntrinsics/vsfa.ll
    llvm/test/CodeGen/VE/VELIntrinsics/vsla.ll
    llvm/test/CodeGen/VE/VELIntrinsics/vsll.ll
    llvm/test/CodeGen/VE/VELIntrinsics/vsra.ll
    llvm/test/CodeGen/VE/VELIntrinsics/vsrl.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
    llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
index 116cdc6fcc6a..e1f87122237a 100644
--- a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
+++ b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
@@ -415,3 +415,78 @@ let TargetPrefix = "ve" in def int_ve_vl_pvsequp_vl : GCCBuiltin<"__builtin_ve_v
 let TargetPrefix = "ve" in def int_ve_vl_pvsequp_vvl : GCCBuiltin<"__builtin_ve_vl_pvsequp_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
 let TargetPrefix = "ve" in def int_ve_vl_pvseq_vl : GCCBuiltin<"__builtin_ve_vl_pvseq_vl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>], [IntrNoMem]>;
 let TargetPrefix = "ve" in def int_ve_vl_pvseq_vvl : GCCBuiltin<"__builtin_ve_vl_pvseq_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsll_vvvl : GCCBuiltin<"__builtin_ve_vl_vsll_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsll_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsll_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsll_vvsl : GCCBuiltin<"__builtin_ve_vl_vsll_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsll_vvsvl : GCCBuiltin<"__builtin_ve_vl_vsll_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsll_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsll_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsll_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vsll_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvsMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvvl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvsl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvsvl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvsMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vslawsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vslawsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvsl : GCCBuiltin<"__builtin_ve_vl_vslawsx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvsvl : GCCBuiltin<"__builtin_ve_vl_vslawsx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vslawsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vslawsx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vslawzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vslawzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvsl : GCCBuiltin<"__builtin_ve_vl_vslawzx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvsvl : GCCBuiltin<"__builtin_ve_vl_vslawzx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vslawzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vslawzx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvsMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslal_vvvl : GCCBuiltin<"__builtin_ve_vl_vslal_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslal_vvvvl : GCCBuiltin<"__builtin_ve_vl_vslal_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslal_vvsl : GCCBuiltin<"__builtin_ve_vl_vslal_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslal_vvsvl : GCCBuiltin<"__builtin_ve_vl_vslal_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslal_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vslal_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vslal_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vslal_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vsrawsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsrawsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvsl : GCCBuiltin<"__builtin_ve_vl_vsrawsx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvsvl : GCCBuiltin<"__builtin_ve_vl_vsrawsx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsrawsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vsrawsx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vsrawzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsrawzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvsl : GCCBuiltin<"__builtin_ve_vl_vsrawzx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvsvl : GCCBuiltin<"__builtin_ve_vl_vsrawzx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsrawzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vsrawzx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvsMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsral_vvvl : GCCBuiltin<"__builtin_ve_vl_vsral_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsral_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsral_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsral_vvsl : GCCBuiltin<"__builtin_ve_vl_vsral_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsral_vvsvl : GCCBuiltin<"__builtin_ve_vl_vsral_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsral_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsral_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsral_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vsral_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsfa_vvssl : GCCBuiltin<"__builtin_ve_vl_vsfa_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsfa_vvssvl : GCCBuiltin<"__builtin_ve_vl_vsfa_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsfa_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vsfa_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;

diff  --git a/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td
index a2ca074d655b..fc34a0c805d7 100644
--- a/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td
+++ b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td
@@ -609,3 +609,105 @@ def : Pat<(int_ve_vl_pvsequp_vl i32:$vl), (PVSEQUPl i32:$vl)>;
 def : Pat<(int_ve_vl_pvsequp_vvl v256f64:$pt, i32:$vl), (PVSEQUPl_v i32:$vl, v256f64:$pt)>;
 def : Pat<(int_ve_vl_pvseq_vl i32:$vl), (PVSEQl i32:$vl)>;
 def : Pat<(int_ve_vl_pvseq_vvl v256f64:$pt, i32:$vl), (PVSEQl_v i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsll_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (VSLLvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_vsll_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (VSLLvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsll_vvsl v256f64:$vz, i64:$sy, i32:$vl), (VSLLvrl v256f64:$vz, i64:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_vsll_vvsvl v256f64:$vz, i64:$sy, v256f64:$pt, i32:$vl), (VSLLvrl_v v256f64:$vz, i64:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsll_vvsl v256f64:$vz, uimm6:$N, i32:$vl), (VSLLvil v256f64:$vz, (ULO7 $N), i32:$vl)>;
+def : Pat<(int_ve_vl_vsll_vvsvl v256f64:$vz, uimm6:$N, v256f64:$pt, i32:$vl), (VSLLvil_v v256f64:$vz, (ULO7 $N), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsll_vvvmvl v256f64:$vz, v256f64:$vy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLLvvml_v v256f64:$vz, v256f64:$vy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsll_vvsmvl v256f64:$vz, i64:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLLvrml_v v256f64:$vz, i64:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsll_vvsmvl v256f64:$vz, uimm6:$N, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLLviml_v v256f64:$vz, (ULO7 $N), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsll_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (PVSLLvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_pvsll_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (PVSLLvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsll_vvsl v256f64:$vz, i64:$sy, i32:$vl), (PVSLLvrl v256f64:$vz, i64:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_pvsll_vvsvl v256f64:$vz, i64:$sy, v256f64:$pt, i32:$vl), (PVSLLvrl_v v256f64:$vz, i64:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsll_vvvMvl v256f64:$vz, v256f64:$vy, v512i1:$vm, v256f64:$pt, i32:$vl), (PVSLLvvml_v v256f64:$vz, v256f64:$vy, v512i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsll_vvsMvl v256f64:$vz, i64:$sy, v512i1:$vm, v256f64:$pt, i32:$vl), (PVSLLvrml_v v256f64:$vz, i64:$sy, v512i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrl_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (VSRLvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_vsrl_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (VSRLvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrl_vvsl v256f64:$vz, i64:$sy, i32:$vl), (VSRLvrl v256f64:$vz, i64:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_vsrl_vvsvl v256f64:$vz, i64:$sy, v256f64:$pt, i32:$vl), (VSRLvrl_v v256f64:$vz, i64:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrl_vvsl v256f64:$vz, uimm6:$N, i32:$vl), (VSRLvil v256f64:$vz, (ULO7 $N), i32:$vl)>;
+def : Pat<(int_ve_vl_vsrl_vvsvl v256f64:$vz, uimm6:$N, v256f64:$pt, i32:$vl), (VSRLvil_v v256f64:$vz, (ULO7 $N), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrl_vvvmvl v256f64:$vz, v256f64:$vy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRLvvml_v v256f64:$vz, v256f64:$vy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrl_vvsmvl v256f64:$vz, i64:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRLvrml_v v256f64:$vz, i64:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrl_vvsmvl v256f64:$vz, uimm6:$N, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRLviml_v v256f64:$vz, (ULO7 $N), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsrl_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (PVSRLvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_pvsrl_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (PVSRLvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsrl_vvsl v256f64:$vz, i64:$sy, i32:$vl), (PVSRLvrl v256f64:$vz, i64:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_pvsrl_vvsvl v256f64:$vz, i64:$sy, v256f64:$pt, i32:$vl), (PVSRLvrl_v v256f64:$vz, i64:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsrl_vvvMvl v256f64:$vz, v256f64:$vy, v512i1:$vm, v256f64:$pt, i32:$vl), (PVSRLvvml_v v256f64:$vz, v256f64:$vy, v512i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsrl_vvsMvl v256f64:$vz, i64:$sy, v512i1:$vm, v256f64:$pt, i32:$vl), (PVSRLvrml_v v256f64:$vz, i64:$sy, v512i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawsx_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (VSLAWSXvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_vslawsx_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (VSLAWSXvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawsx_vvsl v256f64:$vz, i32:$sy, i32:$vl), (VSLAWSXvrl v256f64:$vz, i32:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_vslawsx_vvsvl v256f64:$vz, i32:$sy, v256f64:$pt, i32:$vl), (VSLAWSXvrl_v v256f64:$vz, i32:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawsx_vvsl v256f64:$vz, uimm6:$N, i32:$vl), (VSLAWSXvil v256f64:$vz, (ULO7 $N), i32:$vl)>;
+def : Pat<(int_ve_vl_vslawsx_vvsvl v256f64:$vz, uimm6:$N, v256f64:$pt, i32:$vl), (VSLAWSXvil_v v256f64:$vz, (ULO7 $N), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawsx_vvvmvl v256f64:$vz, v256f64:$vy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLAWSXvvml_v v256f64:$vz, v256f64:$vy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawsx_vvsmvl v256f64:$vz, i32:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLAWSXvrml_v v256f64:$vz, i32:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawsx_vvsmvl v256f64:$vz, uimm6:$N, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLAWSXviml_v v256f64:$vz, (ULO7 $N), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawzx_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (VSLAWZXvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_vslawzx_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (VSLAWZXvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawzx_vvsl v256f64:$vz, i32:$sy, i32:$vl), (VSLAWZXvrl v256f64:$vz, i32:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_vslawzx_vvsvl v256f64:$vz, i32:$sy, v256f64:$pt, i32:$vl), (VSLAWZXvrl_v v256f64:$vz, i32:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawzx_vvsl v256f64:$vz, uimm6:$N, i32:$vl), (VSLAWZXvil v256f64:$vz, (ULO7 $N), i32:$vl)>;
+def : Pat<(int_ve_vl_vslawzx_vvsvl v256f64:$vz, uimm6:$N, v256f64:$pt, i32:$vl), (VSLAWZXvil_v v256f64:$vz, (ULO7 $N), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawzx_vvvmvl v256f64:$vz, v256f64:$vy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLAWZXvvml_v v256f64:$vz, v256f64:$vy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawzx_vvsmvl v256f64:$vz, i32:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLAWZXvrml_v v256f64:$vz, i32:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslawzx_vvsmvl v256f64:$vz, uimm6:$N, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLAWZXviml_v v256f64:$vz, (ULO7 $N), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsla_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (PVSLAvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_pvsla_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (PVSLAvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsla_vvsl v256f64:$vz, i64:$sy, i32:$vl), (PVSLAvrl v256f64:$vz, i64:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_pvsla_vvsvl v256f64:$vz, i64:$sy, v256f64:$pt, i32:$vl), (PVSLAvrl_v v256f64:$vz, i64:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsla_vvvMvl v256f64:$vz, v256f64:$vy, v512i1:$vm, v256f64:$pt, i32:$vl), (PVSLAvvml_v v256f64:$vz, v256f64:$vy, v512i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsla_vvsMvl v256f64:$vz, i64:$sy, v512i1:$vm, v256f64:$pt, i32:$vl), (PVSLAvrml_v v256f64:$vz, i64:$sy, v512i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslal_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (VSLALvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_vslal_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (VSLALvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslal_vvsl v256f64:$vz, i64:$sy, i32:$vl), (VSLALvrl v256f64:$vz, i64:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_vslal_vvsvl v256f64:$vz, i64:$sy, v256f64:$pt, i32:$vl), (VSLALvrl_v v256f64:$vz, i64:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslal_vvsl v256f64:$vz, uimm6:$N, i32:$vl), (VSLALvil v256f64:$vz, (ULO7 $N), i32:$vl)>;
+def : Pat<(int_ve_vl_vslal_vvsvl v256f64:$vz, uimm6:$N, v256f64:$pt, i32:$vl), (VSLALvil_v v256f64:$vz, (ULO7 $N), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslal_vvvmvl v256f64:$vz, v256f64:$vy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLALvvml_v v256f64:$vz, v256f64:$vy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslal_vvsmvl v256f64:$vz, i64:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLALvrml_v v256f64:$vz, i64:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vslal_vvsmvl v256f64:$vz, uimm6:$N, v256i1:$vm, v256f64:$pt, i32:$vl), (VSLALviml_v v256f64:$vz, (ULO7 $N), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawsx_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (VSRAWSXvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_vsrawsx_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (VSRAWSXvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawsx_vvsl v256f64:$vz, i32:$sy, i32:$vl), (VSRAWSXvrl v256f64:$vz, i32:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_vsrawsx_vvsvl v256f64:$vz, i32:$sy, v256f64:$pt, i32:$vl), (VSRAWSXvrl_v v256f64:$vz, i32:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawsx_vvsl v256f64:$vz, uimm6:$N, i32:$vl), (VSRAWSXvil v256f64:$vz, (ULO7 $N), i32:$vl)>;
+def : Pat<(int_ve_vl_vsrawsx_vvsvl v256f64:$vz, uimm6:$N, v256f64:$pt, i32:$vl), (VSRAWSXvil_v v256f64:$vz, (ULO7 $N), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawsx_vvvmvl v256f64:$vz, v256f64:$vy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRAWSXvvml_v v256f64:$vz, v256f64:$vy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawsx_vvsmvl v256f64:$vz, i32:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRAWSXvrml_v v256f64:$vz, i32:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawsx_vvsmvl v256f64:$vz, uimm6:$N, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRAWSXviml_v v256f64:$vz, (ULO7 $N), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawzx_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (VSRAWZXvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_vsrawzx_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (VSRAWZXvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawzx_vvsl v256f64:$vz, i32:$sy, i32:$vl), (VSRAWZXvrl v256f64:$vz, i32:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_vsrawzx_vvsvl v256f64:$vz, i32:$sy, v256f64:$pt, i32:$vl), (VSRAWZXvrl_v v256f64:$vz, i32:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawzx_vvsl v256f64:$vz, uimm6:$N, i32:$vl), (VSRAWZXvil v256f64:$vz, (ULO7 $N), i32:$vl)>;
+def : Pat<(int_ve_vl_vsrawzx_vvsvl v256f64:$vz, uimm6:$N, v256f64:$pt, i32:$vl), (VSRAWZXvil_v v256f64:$vz, (ULO7 $N), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawzx_vvvmvl v256f64:$vz, v256f64:$vy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRAWZXvvml_v v256f64:$vz, v256f64:$vy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawzx_vvsmvl v256f64:$vz, i32:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRAWZXvrml_v v256f64:$vz, i32:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsrawzx_vvsmvl v256f64:$vz, uimm6:$N, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRAWZXviml_v v256f64:$vz, (ULO7 $N), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsra_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (PVSRAvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_pvsra_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (PVSRAvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsra_vvsl v256f64:$vz, i64:$sy, i32:$vl), (PVSRAvrl v256f64:$vz, i64:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_pvsra_vvsvl v256f64:$vz, i64:$sy, v256f64:$pt, i32:$vl), (PVSRAvrl_v v256f64:$vz, i64:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsra_vvvMvl v256f64:$vz, v256f64:$vy, v512i1:$vm, v256f64:$pt, i32:$vl), (PVSRAvvml_v v256f64:$vz, v256f64:$vy, v512i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_pvsra_vvsMvl v256f64:$vz, i64:$sy, v512i1:$vm, v256f64:$pt, i32:$vl), (PVSRAvrml_v v256f64:$vz, i64:$sy, v512i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsral_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (VSRALvvl v256f64:$vz, v256f64:$vy, i32:$vl)>;
+def : Pat<(int_ve_vl_vsral_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$pt, i32:$vl), (VSRALvvl_v v256f64:$vz, v256f64:$vy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsral_vvsl v256f64:$vz, i64:$sy, i32:$vl), (VSRALvrl v256f64:$vz, i64:$sy, i32:$vl)>;
+def : Pat<(int_ve_vl_vsral_vvsvl v256f64:$vz, i64:$sy, v256f64:$pt, i32:$vl), (VSRALvrl_v v256f64:$vz, i64:$sy, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsral_vvsl v256f64:$vz, uimm6:$N, i32:$vl), (VSRALvil v256f64:$vz, (ULO7 $N), i32:$vl)>;
+def : Pat<(int_ve_vl_vsral_vvsvl v256f64:$vz, uimm6:$N, v256f64:$pt, i32:$vl), (VSRALvil_v v256f64:$vz, (ULO7 $N), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsral_vvvmvl v256f64:$vz, v256f64:$vy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRALvvml_v v256f64:$vz, v256f64:$vy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsral_vvsmvl v256f64:$vz, i64:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRALvrml_v v256f64:$vz, i64:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsral_vvsmvl v256f64:$vz, uimm6:$N, v256i1:$vm, v256f64:$pt, i32:$vl), (VSRALviml_v v256f64:$vz, (ULO7 $N), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsfa_vvssl v256f64:$vz, i64:$sy, i64:$sz, i32:$vl), (VSFAvrrl v256f64:$vz, i64:$sy, i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vsfa_vvssvl v256f64:$vz, i64:$sy, i64:$sz, v256f64:$pt, i32:$vl), (VSFAvrrl_v v256f64:$vz, i64:$sy, i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsfa_vvssl v256f64:$vz, simm7:$I, i64:$sz, i32:$vl), (VSFAvirl v256f64:$vz, (LO7 $I), i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vsfa_vvssvl v256f64:$vz, simm7:$I, i64:$sz, v256f64:$pt, i32:$vl), (VSFAvirl_v v256f64:$vz, (LO7 $I), i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsfa_vvssmvl v256f64:$vz, i64:$sy, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VSFAvrrml_v v256f64:$vz, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsfa_vvssmvl v256f64:$vz, simm7:$I, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VSFAvirml_v v256f64:$vz, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/vsfa.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vsfa.ll
new file mode 100644
index 000000000000..38be0fbc9d80
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/vsfa.ll
@@ -0,0 +1,100 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test vector shift left and add intrinsic instructions
+;;;
+;;; Note:
+;;;   We test VSFA*vrrl, VSFA*vrrl_v, VSFA*virl, VSFA*virl_v, VSFA*vrrml_v, and
+;;;   VSFA*virml_v instructions.
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsfa_vvssl(<256 x double> %0, i64 %1, i64 %2) {
+; CHECK-LABEL: vsfa_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vsfa %v0, %v0, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double> %0, i64 %1, i64 %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsfa_vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3) {
+; CHECK-LABEL: vsfa_vvssvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vsfa %v1, %v0, %s0, %s1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsfa.vvssvl(<256 x double>, i64, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsfa_vvssl_imm(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vsfa_vvssl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsfa %v0, %v0, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double> %0, i64 8, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsfa_vvssvl_imm(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vsfa_vvssvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsfa %v1, %v0, 8, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssvl(<256 x double> %0, i64 8, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsfa_vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4) {
+; CHECK-LABEL: vsfa_vvssmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vsfa %v1, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %6 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4, i32 128)
+  ret <256 x double> %6
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsfa.vvssmvl(<256 x double>, i64, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsfa_vvssmvl_imm(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vsfa_vvssmvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsfa %v1, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssmvl(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/vsla.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vsla.ll
new file mode 100644
index 000000000000..37e6325ef67a
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/vsla.ll
@@ -0,0 +1,548 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test vector shift left arithmetic intrinsic instructions
+;;;
+;;; Note:
+;;;   We test VSLA*vvl, VSLA*vvl_v, VSLA*vrl, VSLA*vrl_v, VSLA*vil, VSLA*vil_v,
+;;;   VSLA*vvml_v, VSLA*vrml_v, VSLA*viml_v, PVSLA*vvl, PVSLA*vvl_v, PVSLA*vrl,
+;;;   PVSLA*vrl_v, PVSLA*vvml_v, and PVSLA*vrml_v instructions.
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawsx_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vslawsx_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.sx %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawsx.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawsx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: vslawsx_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.sx %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawsx_vvsl(<256 x double> %0, i32 signext %1) {
+; CHECK-LABEL: vslawsx_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsla.w.sx %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsl(<256 x double> %0, i32 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawsx.vvsl(<256 x double>, i32, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawsx_vvsvl(<256 x double> %0, i32 signext %1, <256 x double> %2) {
+; CHECK-LABEL: vslawsx_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsla.w.sx %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsvl(<256 x double> %0, i32 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawsx.vvsvl(<256 x double>, i32, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawsx_vvsl_imm(<256 x double> %0) {
+; CHECK-LABEL: vslawsx_vvsl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.sx %v0, %v0, 8
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsl(<256 x double> %0, i32 8, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawsx_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vslawsx_vvsvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.sx %v1, %v0, 8
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsvl(<256 x double> %0, i32 8, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawsx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vslawsx_vvvmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.sx %v2, %v0, %v1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawsx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawsx_vvsmvl(<256 x double> %0, i32 signext %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vslawsx_vvsmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsla.w.sx %v1, %v0, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsmvl(<256 x double> %0, i32 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawsx.vvsmvl(<256 x double>, i32, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawsx_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vslawsx_vvsmvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.sx %v1, %v0, 8, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsmvl(<256 x double> %0, i32 8, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawzx_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vslawzx_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.zx %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawzx.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawzx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: vslawzx_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.zx %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawzx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawzx_vvsl(<256 x double> %0, i32 signext %1) {
+; CHECK-LABEL: vslawzx_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsla.w.zx %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsl(<256 x double> %0, i32 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawzx.vvsl(<256 x double>, i32, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawzx_vvsvl(<256 x double> %0, i32 signext %1, <256 x double> %2) {
+; CHECK-LABEL: vslawzx_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsla.w.zx %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsvl(<256 x double> %0, i32 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawzx.vvsvl(<256 x double>, i32, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawzx_vvsl_imm(<256 x double> %0) {
+; CHECK-LABEL: vslawzx_vvsl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.zx %v0, %v0, 8
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsl(<256 x double> %0, i32 8, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawzx_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vslawzx_vvsvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.zx %v1, %v0, 8
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsvl(<256 x double> %0, i32 8, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawzx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vslawzx_vvvmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.zx %v2, %v0, %v1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawzx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawzx_vvsmvl(<256 x double> %0, i32 signext %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vslawzx_vvsmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsla.w.zx %v1, %v0, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsmvl(<256 x double> %0, i32 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslawzx.vvsmvl(<256 x double>, i32, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslawzx_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vslawzx_vvsmvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.w.zx %v1, %v0, 8, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsmvl(<256 x double> %0, i32 8, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslal_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vslal_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.l %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslal.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslal_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: vslal_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.l %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslal.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslal_vvsl(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vslal_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsla.l %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsl(<256 x double> %0, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslal.vvsl(<256 x double>, i64, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslal_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vslal_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsla.l %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslal.vvsvl(<256 x double>, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslal_vvsl_imm(<256 x double> %0) {
+; CHECK-LABEL: vslal_vvsl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.l %v0, %v0, 8
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsl(<256 x double> %0, i64 8, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslal_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vslal_vvsvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.l %v1, %v0, 8
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsvl(<256 x double> %0, i64 8, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslal_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vslal_vvvmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.l %v2, %v0, %v1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslal.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslal_vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vslal_vvsmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsla.l %v1, %v0, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vslal.vvsmvl(<256 x double>, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vslal_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vslal_vvsmvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsla.l %v1, %v0, 8, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsmvl(<256 x double> %0, i64 8, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsla_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: pvsla_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsla %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsla.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsla_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: pvsla_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsla %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsla.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsla_vvsl(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: pvsla_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsla %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvsl(<256 x double> %0, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsla.vvsl(<256 x double>, i64, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsla_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: pvsla_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsla %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsla.vvsvl(<256 x double>, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsla_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: pvsla_vvvMvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsla %v2, %v0, %v1, %vm2
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsla.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsla_vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: pvsla_vvsMvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsla %v1, %v0, %s0, %vm2
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsla.vvsMvl(<256 x double>, i64, <512 x i1>, <256 x double>, i32)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/vsll.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vsll.ll
new file mode 100644
index 000000000000..5f7f64e237db
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/vsll.ll
@@ -0,0 +1,254 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test vector shift left logical intrinsic instructions
+;;;
+;;; Note:
+;;;   We test VSLL*vvl, VSLL*vvl_v, VSLL*vrl, VSLL*vrl_v, VSLL*vil, VSLL*vil_v,
+;;;   VSLL*vvml_v, VSLL*vrml_v, VSLL*viml_v, PVSLL*vvl, PVSLL*vvl_v, PVSLL*vrl,
+;;;   PVSLL*vrl_v, PVSLL*vvml_v, and PVSLL*vrml_v instructions.
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsll_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsll_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsll %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsll.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsll.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsll_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: vsll_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsll %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsll.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsll.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsll_vvsl(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vsll_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsll %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsll.vvsl(<256 x double> %0, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsll.vvsl(<256 x double>, i64, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsll_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vsll_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsll %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsll.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsll.vvsvl(<256 x double>, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsll_vvsl_imm(<256 x double> %0) {
+; CHECK-LABEL: vsll_vvsl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsll %v0, %v0, 8
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vsll.vvsl(<256 x double> %0, i64 8, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsll_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsll_vvsvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsll %v1, %v0, 8
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsll.vvsvl(<256 x double> %0, i64 8, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsll_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vsll_vvvmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsll %v2, %v0, %v1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsll.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsll.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsll_vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vsll_vvsmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsll %v1, %v0, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsll.vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsll.vvsmvl(<256 x double>, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsll_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vsll_vvsmvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsll %v1, %v0, 8, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsll.vvsmvl(<256 x double> %0, i64 8, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsll_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: pvsll_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsll %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.pvsll.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsll.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsll_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: pvsll_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsll %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.pvsll.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsll.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsll_vvsl(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: pvsll_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsll %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.pvsll.vvsl(<256 x double> %0, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsll.vvsl(<256 x double>, i64, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsll_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: pvsll_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsll %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.pvsll.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsll.vvsvl(<256 x double>, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsll_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: pvsll_vvvMvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsll %v2, %v0, %v1, %vm2
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.pvsll.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsll.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsll_vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: pvsll_vvsMvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsll %v1, %v0, %s0, %vm2
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.pvsll.vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsll.vvsMvl(<256 x double>, i64, <512 x i1>, <256 x double>, i32)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/vsra.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vsra.ll
new file mode 100644
index 000000000000..db023f7b5485
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/vsra.ll
@@ -0,0 +1,548 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test vector shift right arithmetic intrinsic instructions
+;;;
+;;; Note:
+;;;   We test VSRA*vvl, VSRA*vvl_v, VSRA*vrl, VSRA*vrl_v, VSRA*vil, VSRA*vil_v,
+;;;   VSRA*vvml_v, VSRA*vrml_v, VSRA*viml_v, PVSRA*vvl, PVSRA*vvl_v, PVSRA*vrl,
+;;;   PVSRA*vrl_v, PVSRA*vvml_v, and PVSRA*vrml_v instructions.
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawsx_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsrawsx_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.sx %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawsx.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawsx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: vsrawsx_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.sx %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawsx_vvsl(<256 x double> %0, i32 signext %1) {
+; CHECK-LABEL: vsrawsx_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsra.w.sx %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsl(<256 x double> %0, i32 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawsx.vvsl(<256 x double>, i32, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawsx_vvsvl(<256 x double> %0, i32 signext %1, <256 x double> %2) {
+; CHECK-LABEL: vsrawsx_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsra.w.sx %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsvl(<256 x double> %0, i32 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawsx.vvsvl(<256 x double>, i32, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawsx_vvsl_imm(<256 x double> %0) {
+; CHECK-LABEL: vsrawsx_vvsl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.sx %v0, %v0, 8
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsl(<256 x double> %0, i32 8, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawsx_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsrawsx_vvsvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.sx %v1, %v0, 8
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsvl(<256 x double> %0, i32 8, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawsx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vsrawsx_vvvmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.sx %v2, %v0, %v1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawsx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawsx_vvsmvl(<256 x double> %0, i32 signext %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vsrawsx_vvsmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsra.w.sx %v1, %v0, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsmvl(<256 x double> %0, i32 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawsx.vvsmvl(<256 x double>, i32, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawsx_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vsrawsx_vvsmvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.sx %v1, %v0, 8, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsmvl(<256 x double> %0, i32 8, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawzx_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsrawzx_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.zx %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawzx.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawzx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: vsrawzx_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.zx %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawzx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawzx_vvsl(<256 x double> %0, i32 signext %1) {
+; CHECK-LABEL: vsrawzx_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsra.w.zx %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsl(<256 x double> %0, i32 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawzx.vvsl(<256 x double>, i32, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawzx_vvsvl(<256 x double> %0, i32 signext %1, <256 x double> %2) {
+; CHECK-LABEL: vsrawzx_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsra.w.zx %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsvl(<256 x double> %0, i32 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawzx.vvsvl(<256 x double>, i32, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawzx_vvsl_imm(<256 x double> %0) {
+; CHECK-LABEL: vsrawzx_vvsl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.zx %v0, %v0, 8
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsl(<256 x double> %0, i32 8, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawzx_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsrawzx_vvsvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.zx %v1, %v0, 8
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsvl(<256 x double> %0, i32 8, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawzx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vsrawzx_vvvmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.zx %v2, %v0, %v1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawzx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawzx_vvsmvl(<256 x double> %0, i32 signext %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vsrawzx_vvsmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsra.w.zx %v1, %v0, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsmvl(<256 x double> %0, i32 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrawzx.vvsmvl(<256 x double>, i32, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrawzx_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vsrawzx_vvsmvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.w.zx %v1, %v0, 8, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsmvl(<256 x double> %0, i32 8, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsral_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsral_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.l %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsral.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsral_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: vsral_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.l %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsral.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsral_vvsl(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vsral_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsra.l %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsl(<256 x double> %0, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsral.vvsl(<256 x double>, i64, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsral_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vsral_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsra.l %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsral.vvsvl(<256 x double>, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsral_vvsl_imm(<256 x double> %0) {
+; CHECK-LABEL: vsral_vvsl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.l %v0, %v0, 8
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsl(<256 x double> %0, i64 8, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsral_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsral_vvsvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.l %v1, %v0, 8
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsvl(<256 x double> %0, i64 8, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsral_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vsral_vvvmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.l %v2, %v0, %v1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsral.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsral_vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vsral_vvsmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsra.l %v1, %v0, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsral.vvsmvl(<256 x double>, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsral_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vsral_vvsmvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsra.l %v1, %v0, 8, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsmvl(<256 x double> %0, i64 8, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsra_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: pvsra_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsra %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsra.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsra_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: pvsra_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsra %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsra.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsra_vvsl(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: pvsra_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsra %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvsl(<256 x double> %0, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsra.vvsl(<256 x double>, i64, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsra_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: pvsra_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsra %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsra.vvsvl(<256 x double>, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsra_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: pvsra_vvvMvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsra %v2, %v0, %v1, %vm2
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsra.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsra_vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: pvsra_vvsMvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsra %v1, %v0, %s0, %vm2
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsra.vvsMvl(<256 x double>, i64, <512 x i1>, <256 x double>, i32)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/vsrl.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vsrl.ll
new file mode 100644
index 000000000000..a9a1d9d92f0e
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/vsrl.ll
@@ -0,0 +1,254 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test vector shift right logical intrinsic instructions
+;;;
+;;; Note:
+;;;   We test VSRL*vvl, VSRL*vvl_v, VSRL*vrl, VSRL*vrl_v, VSRL*vil, VSRL*vil_v,
+;;;   VSRL*vvml_v, VSRL*vrml_v, VSRL*viml_v, PVSRL*vvl, PVSRL*vvl_v, PVSRL*vrl,
+;;;   PVSRL*vrl_v, PVSRL*vvml_v, and PVSRL*vrml_v instructions.
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrl_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsrl_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsrl %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrl.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrl_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: vsrl_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsrl %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrl.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrl_vvsl(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vsrl_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsrl %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsl(<256 x double> %0, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrl.vvsl(<256 x double>, i64, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrl_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vsrl_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsrl %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrl.vvsvl(<256 x double>, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrl_vvsl_imm(<256 x double> %0) {
+; CHECK-LABEL: vsrl_vvsl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsrl %v0, %v0, 8
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsl(<256 x double> %0, i64 8, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrl_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsrl_vvsvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsrl %v1, %v0, 8
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsvl(<256 x double> %0, i64 8, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrl_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vsrl_vvvmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsrl %v2, %v0, %v1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrl.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrl_vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vsrl_vvsmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsrl %v1, %v0, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.vsrl.vvsmvl(<256 x double>, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @vsrl_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vsrl_vvsmvl_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsrl %v1, %v0, 8, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrl.vvsmvl(<256 x double> %0, i64 8, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsrl_vvvl(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: pvsrl_vvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsrl %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsrl.vvvl(<256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsrl_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
+; CHECK-LABEL: pvsrl_vvvvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsrl %v2, %v0, %v1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsrl.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsrl_vvsl(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: pvsrl_vvsl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsrl %v0, %v0, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvsl(<256 x double> %0, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsrl.vvsl(<256 x double>, i64, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsrl_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: pvsrl_vvsvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsrl %v1, %v0, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsrl.vvsvl(<256 x double>, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsrl_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: pvsrl_vvvMvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvsrl %v2, %v0, %v1, %vm2
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsrl.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x double> @pvsrl_vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: pvsrl_vvsMvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvsrl %v1, %v0, %s0, %vm2
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.pvsrl.vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.pvsrl.vvsMvl(<256 x double>, i64, <512 x i1>, <256 x double>, i32)


        


More information about the llvm-branch-commits mailing list