[llvm] 87f308a - [VE] Add vgt and vsc intrinsic instructions

Kazushi Marukawa via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 11 01:23:51 PST 2020


Author: Kazushi (Jam) Marukawa
Date: 2020-12-11T18:23:43+09:00
New Revision: 87f308ab3dcf493e19abd41ee06ba9b62d6c851c

URL: https://github.com/llvm/llvm-project/commit/87f308ab3dcf493e19abd41ee06ba9b62d6c851c
DIFF: https://github.com/llvm/llvm-project/commit/87f308ab3dcf493e19abd41ee06ba9b62d6c851c.diff

LOG: [VE] Add vgt and vsc intrinsic instructions

Add vgt and vsc intrinsic instructions and regression tests.

Reviewed By: simoll

Differential Revision: https://reviews.llvm.org/D93032

Added: 
    llvm/test/CodeGen/VE/VELIntrinsics/vgt.ll
    llvm/test/CodeGen/VE/VELIntrinsics/vsc.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
    llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
index 1db7003f0ffd..c22fecafb39d 100644
--- a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
+++ b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
@@ -1140,3 +1140,59 @@ let TargetPrefix = "ve" in def int_ve_vl_vror_vvl : GCCBuiltin<"__builtin_ve_vl_
 let TargetPrefix = "ve" in def int_ve_vl_vror_vvml : GCCBuiltin<"__builtin_ve_vl_vror_vvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
 let TargetPrefix = "ve" in def int_ve_vl_vrxor_vvl : GCCBuiltin<"__builtin_ve_vl_vrxor_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
 let TargetPrefix = "ve" in def int_ve_vl_vrxor_vvml : GCCBuiltin<"__builtin_ve_vl_vrxor_vvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssl : GCCBuiltin<"__builtin_ve_vl_vgt_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgt_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssml : GCCBuiltin<"__builtin_ve_vl_vgt_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgt_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtnc_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtnc_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtnc_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtnc_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtu_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtu_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtu_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtu_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtunc_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtunc_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtunc_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtunc_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtlsx_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtlsx_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtlsx_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtlsx_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtlzx_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtlzx_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtlzx_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtlzx_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsc_vvssl : GCCBuiltin<"__builtin_ve_vl_vsc_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsc_vvssml : GCCBuiltin<"__builtin_ve_vl_vsc_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscnc_vvssl : GCCBuiltin<"__builtin_ve_vl_vscnc_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscnc_vvssml : GCCBuiltin<"__builtin_ve_vl_vscnc_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscot_vvssl : GCCBuiltin<"__builtin_ve_vl_vscot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscot_vvssml : GCCBuiltin<"__builtin_ve_vl_vscot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscncot_vvssl : GCCBuiltin<"__builtin_ve_vl_vscncot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscncot_vvssml : GCCBuiltin<"__builtin_ve_vl_vscncot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscu_vvssl : GCCBuiltin<"__builtin_ve_vl_vscu_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscu_vvssml : GCCBuiltin<"__builtin_ve_vl_vscu_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscunc_vvssl : GCCBuiltin<"__builtin_ve_vl_vscunc_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscunc_vvssml : GCCBuiltin<"__builtin_ve_vl_vscunc_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscuot_vvssl : GCCBuiltin<"__builtin_ve_vl_vscuot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscuot_vvssml : GCCBuiltin<"__builtin_ve_vl_vscuot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscuncot_vvssl : GCCBuiltin<"__builtin_ve_vl_vscuncot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscuncot_vvssml : GCCBuiltin<"__builtin_ve_vl_vscuncot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscl_vvssl : GCCBuiltin<"__builtin_ve_vl_vscl_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vscl_vvssml : GCCBuiltin<"__builtin_ve_vl_vscl_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsclnc_vvssl : GCCBuiltin<"__builtin_ve_vl_vsclnc_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsclnc_vvssml : GCCBuiltin<"__builtin_ve_vl_vsclnc_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsclot_vvssl : GCCBuiltin<"__builtin_ve_vl_vsclot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsclot_vvssml : GCCBuiltin<"__builtin_ve_vl_vsclot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsclncot_vvssl : GCCBuiltin<"__builtin_ve_vl_vsclncot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_vsclncot_vvssml : GCCBuiltin<"__builtin_ve_vl_vsclncot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;

diff  --git a/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td
index e84edb07dae1..dbd173ef3690 100644
--- a/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td
+++ b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td
@@ -1363,3 +1363,227 @@ def : Pat<(int_ve_vl_vror_vvl v256f64:$vy, i32:$vl), (VRORvl v256f64:$vy, i32:$v
 def : Pat<(int_ve_vl_vror_vvml v256f64:$vy, v256i1:$vm, i32:$vl), (VRORvml v256f64:$vy, v256i1:$vm, i32:$vl)>;
 def : Pat<(int_ve_vl_vrxor_vvl v256f64:$vy, i32:$vl), (VRXORvl v256f64:$vy, i32:$vl)>;
 def : Pat<(int_ve_vl_vrxor_vvml v256f64:$vy, v256i1:$vm, i32:$vl), (VRXORvml v256f64:$vy, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgt_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VGTvrrl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgt_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$pt, i32:$vl), (VGTvrrl_v v256f64:$vy, i64:$sy, i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgt_vvssl v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VGTvrzl v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgt_vvssvl v256f64:$vy, i64:$sy, zero:$Z, v256f64:$pt, i32:$vl), (VGTvrzl_v v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgt_vvssl v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VGTvirl v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgt_vvssvl v256f64:$vy, simm7:$I, i64:$sz, v256f64:$pt, i32:$vl), (VGTvirl_v v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgt_vvssl v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VGTvizl v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgt_vvssvl v256f64:$vy, simm7:$I, zero:$Z, v256f64:$pt, i32:$vl), (VGTvizl_v v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgt_vvssml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VGTvrrml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgt_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTvrrml_v v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgt_vvssml v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VGTvrzml v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgt_vvssmvl v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTvrzml_v v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgt_vvssml v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VGTvirml v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgt_vvssmvl v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTvirml_v v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgt_vvssml v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VGTvizml v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgt_vvssmvl v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTvizml_v v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtnc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VGTNCvrrl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtnc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$pt, i32:$vl), (VGTNCvrrl_v v256f64:$vy, i64:$sy, i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtnc_vvssl v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VGTNCvrzl v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtnc_vvssvl v256f64:$vy, i64:$sy, zero:$Z, v256f64:$pt, i32:$vl), (VGTNCvrzl_v v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtnc_vvssl v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VGTNCvirl v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtnc_vvssvl v256f64:$vy, simm7:$I, i64:$sz, v256f64:$pt, i32:$vl), (VGTNCvirl_v v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtnc_vvssl v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VGTNCvizl v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtnc_vvssvl v256f64:$vy, simm7:$I, zero:$Z, v256f64:$pt, i32:$vl), (VGTNCvizl_v v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtnc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VGTNCvrrml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtnc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTNCvrrml_v v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtnc_vvssml v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VGTNCvrzml v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtnc_vvssmvl v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTNCvrzml_v v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtnc_vvssml v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VGTNCvirml v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtnc_vvssmvl v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTNCvirml_v v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtnc_vvssml v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VGTNCvizml v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtnc_vvssmvl v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTNCvizml_v v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtu_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VGTUvrrl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtu_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$pt, i32:$vl), (VGTUvrrl_v v256f64:$vy, i64:$sy, i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtu_vvssl v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VGTUvrzl v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtu_vvssvl v256f64:$vy, i64:$sy, zero:$Z, v256f64:$pt, i32:$vl), (VGTUvrzl_v v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtu_vvssl v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VGTUvirl v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtu_vvssvl v256f64:$vy, simm7:$I, i64:$sz, v256f64:$pt, i32:$vl), (VGTUvirl_v v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtu_vvssl v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VGTUvizl v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtu_vvssvl v256f64:$vy, simm7:$I, zero:$Z, v256f64:$pt, i32:$vl), (VGTUvizl_v v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtu_vvssml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VGTUvrrml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtu_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTUvrrml_v v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtu_vvssml v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VGTUvrzml v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtu_vvssmvl v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTUvrzml_v v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtu_vvssml v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VGTUvirml v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtu_vvssmvl v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTUvirml_v v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtu_vvssml v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VGTUvizml v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtu_vvssmvl v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTUvizml_v v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtunc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VGTUNCvrrl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtunc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$pt, i32:$vl), (VGTUNCvrrl_v v256f64:$vy, i64:$sy, i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtunc_vvssl v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VGTUNCvrzl v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtunc_vvssvl v256f64:$vy, i64:$sy, zero:$Z, v256f64:$pt, i32:$vl), (VGTUNCvrzl_v v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtunc_vvssl v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VGTUNCvirl v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtunc_vvssvl v256f64:$vy, simm7:$I, i64:$sz, v256f64:$pt, i32:$vl), (VGTUNCvirl_v v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtunc_vvssl v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VGTUNCvizl v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtunc_vvssvl v256f64:$vy, simm7:$I, zero:$Z, v256f64:$pt, i32:$vl), (VGTUNCvizl_v v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtunc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VGTUNCvrrml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtunc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTUNCvrrml_v v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtunc_vvssml v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VGTUNCvrzml v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtunc_vvssmvl v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTUNCvrzml_v v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtunc_vvssml v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VGTUNCvirml v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtunc_vvssmvl v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTUNCvirml_v v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtunc_vvssml v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VGTUNCvizml v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtunc_vvssmvl v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTUNCvizml_v v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VGTLSXvrrl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$pt, i32:$vl), (VGTLSXvrrl_v v256f64:$vy, i64:$sy, i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssl v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VGTLSXvrzl v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssvl v256f64:$vy, i64:$sy, zero:$Z, v256f64:$pt, i32:$vl), (VGTLSXvrzl_v v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssl v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VGTLSXvirl v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssvl v256f64:$vy, simm7:$I, i64:$sz, v256f64:$pt, i32:$vl), (VGTLSXvirl_v v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssl v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VGTLSXvizl v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssvl v256f64:$vy, simm7:$I, zero:$Z, v256f64:$pt, i32:$vl), (VGTLSXvizl_v v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VGTLSXvrrml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLSXvrrml_v v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssml v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VGTLSXvrzml v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssmvl v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLSXvrzml_v v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssml v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VGTLSXvirml v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssmvl v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLSXvirml_v v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssml v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VGTLSXvizml v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsx_vvssmvl v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLSXvizml_v v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VGTLSXNCvrrl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$pt, i32:$vl), (VGTLSXNCvrrl_v v256f64:$vy, i64:$sy, i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssl v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VGTLSXNCvrzl v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssvl v256f64:$vy, i64:$sy, zero:$Z, v256f64:$pt, i32:$vl), (VGTLSXNCvrzl_v v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssl v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VGTLSXNCvirl v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssvl v256f64:$vy, simm7:$I, i64:$sz, v256f64:$pt, i32:$vl), (VGTLSXNCvirl_v v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssl v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VGTLSXNCvizl v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssvl v256f64:$vy, simm7:$I, zero:$Z, v256f64:$pt, i32:$vl), (VGTLSXNCvizl_v v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VGTLSXNCvrrml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLSXNCvrrml_v v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssml v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VGTLSXNCvrzml v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssmvl v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLSXNCvrzml_v v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssml v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VGTLSXNCvirml v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssmvl v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLSXNCvirml_v v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssml v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VGTLSXNCvizml v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlsxnc_vvssmvl v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLSXNCvizml_v v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VGTLZXvrrl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$pt, i32:$vl), (VGTLZXvrrl_v v256f64:$vy, i64:$sy, i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssl v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VGTLZXvrzl v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssvl v256f64:$vy, i64:$sy, zero:$Z, v256f64:$pt, i32:$vl), (VGTLZXvrzl_v v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssl v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VGTLZXvirl v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssvl v256f64:$vy, simm7:$I, i64:$sz, v256f64:$pt, i32:$vl), (VGTLZXvirl_v v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssl v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VGTLZXvizl v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssvl v256f64:$vy, simm7:$I, zero:$Z, v256f64:$pt, i32:$vl), (VGTLZXvizl_v v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VGTLZXvrrml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLZXvrrml_v v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssml v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VGTLZXvrzml v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssmvl v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLZXvrzml_v v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssml v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VGTLZXvirml v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssmvl v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLZXvirml_v v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssml v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VGTLZXvizml v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzx_vvssmvl v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLZXvizml_v v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VGTLZXNCvrrl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$pt, i32:$vl), (VGTLZXNCvrrl_v v256f64:$vy, i64:$sy, i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssl v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VGTLZXNCvrzl v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssvl v256f64:$vy, i64:$sy, zero:$Z, v256f64:$pt, i32:$vl), (VGTLZXNCvrzl_v v256f64:$vy, i64:$sy, (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssl v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VGTLZXNCvirl v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssvl v256f64:$vy, simm7:$I, i64:$sz, v256f64:$pt, i32:$vl), (VGTLZXNCvirl_v v256f64:$vy, (LO7 $I), i64:$sz, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssl v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VGTLZXNCvizl v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssvl v256f64:$vy, simm7:$I, zero:$Z, v256f64:$pt, i32:$vl), (VGTLZXNCvizl_v v256f64:$vy, (LO7 $I), (LO7 $Z), i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VGTLZXNCvrrml v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLZXNCvrrml_v v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssml v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VGTLZXNCvrzml v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssmvl v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLZXNCvrzml_v v256f64:$vy, i64:$sy, (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssml v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VGTLZXNCvirml v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssmvl v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLZXNCvirml_v v256f64:$vy, (LO7 $I), i64:$sz, v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssml v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VGTLZXNCvizml v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vgtlzxnc_vvssmvl v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, v256f64:$pt, i32:$vl), (VGTLZXNCvizml_v v256f64:$vy, (LO7 $I), (LO7 $Z), v256i1:$vm, i32:$vl, v256f64:$pt)>;
+def : Pat<(int_ve_vl_vsc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsc_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsc_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsc_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsc_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscnc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCNCvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscnc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCNCvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscnc_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCNCvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscnc_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCNCvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscnc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCNCvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscnc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCNCvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscnc_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCNCvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscnc_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCNCvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCOTvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCOTvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCOTvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCOTvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCOTvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCOTvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCOTvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCOTvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCNCOTvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCNCOTvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscncot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCNCOTvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscncot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCNCOTvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCNCOTvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCNCOTvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscncot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCNCOTvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscncot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCNCOTvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscu_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCUvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscu_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCUvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscu_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCUvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscu_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCUvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscu_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCUvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscu_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCUvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscu_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCUvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscu_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCUvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscunc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCUNCvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscunc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCUNCvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscunc_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCUNCvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscunc_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCUNCvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscunc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCUNCvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscunc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCUNCvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscunc_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCUNCvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscunc_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCUNCvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCUOTvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCUOTvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCUOTvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCUOTvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCUOTvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCUOTvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCUOTvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCUOTvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCUNCOTvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCUNCOTvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuncot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCUNCOTvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuncot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCUNCOTvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCUNCOTvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCUNCOTvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuncot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCUNCOTvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscuncot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCUNCOTvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscl_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCLvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscl_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCLvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscl_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCLvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscl_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCLvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vscl_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCLvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscl_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCLvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscl_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCLvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vscl_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCLvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclnc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCLNCvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclnc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCLNCvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclnc_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCLNCvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclnc_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCLNCvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclnc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCLNCvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclnc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCLNCvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclnc_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCLNCvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclnc_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCLNCvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCLOTvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCLOTvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCLOTvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCLOTvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCLOTvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCLOTvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCLOTvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCLOTvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (VSCLNCOTvrrvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, i32:$vl), (VSCLNCOTvrzvl v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclncot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, i32:$vl), (VSCLNCOTvirvl v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclncot_vvssl v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, i32:$vl), (VSCLNCOTvizvl v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v256i1:$vm, i32:$vl), (VSCLNCOTvrrvml v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCLNCOTvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCLNCOTvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCLNCOTvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/vgt.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vgt.ll
new file mode 100644
index 000000000000..cadf6a85d6cc
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/vgt.ll
@@ -0,0 +1,1936 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test vector gather intrinsic instructions
+;;;
+;;; Note:
+;;;   We test VGT*vrrl, VGT*vrrl_v, VGT*vrzl, VGT*vrzl_v, VGT*virl, VGT*virl_v,
+;;;   VGT*vizl, VGT*vizl_v, VGT*vrrml, VGT*vrrml_v, VGT*vrzml, VGT*vrzml_v,
+;;;   VGT*virml, VGT*virml_v, VGT*vizml, and VGT*vizml_v instructions.
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssl(<256 x double> %0, i64 %1, i64 %2) {
+; CHECK-LABEL: vgt_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgt %v0, %v0, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssl(<256 x double> %0, i64 %1, i64 %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgt.vvssl(<256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3) {
+; CHECK-LABEL: vgt_vvssvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgt %v1, %v0, %s0, %s1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgt.vvssvl(<256 x double>, i64, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssl_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgt_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt %v0, %v0, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssl(<256 x double> %0, i64 %1, i64 0, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssvl_imm_1(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgt_vvssvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt %v1, %v0, %s0, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssvl(<256 x double> %0, i64 %1, i64 0, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssl_imm_2(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgt_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt %v0, %v0, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssl(<256 x double> %0, i64 8, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssvl_imm_2(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgt_vvssvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt %v1, %v0, 8, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssvl(<256 x double> %0, i64 8, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssl_imm_3(<256 x double> %0) {
+; CHECK-LABEL: vgt_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgt %v0, %v0, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssl(<256 x double> %0, i64 8, i64 0, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssvl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vgt_vvssvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgt %v1, %v0, 8, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssvl(<256 x double> %0, i64 8, i64 0, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vgt_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgt %v0, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, i32 256)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgt.vvssml(<256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4) {
+; CHECK-LABEL: vgt_vvssmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgt %v1, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %6 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4, i32 128)
+  ret <256 x double> %6
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgt.vvssmvl(<256 x double>, i64, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssml_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgt_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt %v0, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssml(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssmvl_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgt_vvssmvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt %v1, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssmvl(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssml_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgt_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt %v0, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssml(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssmvl_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgt_vvssmvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt %v1, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssmvl(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssml_imm_3(<256 x double> %0, <256 x i1> %1) {
+; CHECK-LABEL: vgt_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgt %v0, %v0, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssml(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssmvl_imm_3(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vgt_vvssmvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgt %v1, %v0, 8, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssmvl(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgt_vvssl_no_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgt_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt %v0, %v0, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgt.vvssl(<256 x double> %0, i64 %1, i64 8, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssl(<256 x double> %0, i64 %1, i64 %2) {
+; CHECK-LABEL: vgtnc_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgt.nc %v0, %v0, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssl(<256 x double> %0, i64 %1, i64 %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtnc.vvssl(<256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3) {
+; CHECK-LABEL: vgtnc_vvssvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgt.nc %v1, %v0, %s0, %s1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtnc.vvssvl(<256 x double>, i64, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssl_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtnc_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt.nc %v0, %v0, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssl(<256 x double> %0, i64 %1, i64 0, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssvl_imm_1(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtnc_vvssvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt.nc %v1, %v0, %s0, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssvl(<256 x double> %0, i64 %1, i64 0, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssl_imm_2(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtnc_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt.nc %v0, %v0, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssl(<256 x double> %0, i64 8, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssvl_imm_2(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtnc_vvssvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt.nc %v1, %v0, 8, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssvl(<256 x double> %0, i64 8, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssl_imm_3(<256 x double> %0) {
+; CHECK-LABEL: vgtnc_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgt.nc %v0, %v0, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssl(<256 x double> %0, i64 8, i64 0, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssvl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vgtnc_vvssvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgt.nc %v1, %v0, 8, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssvl(<256 x double> %0, i64 8, i64 0, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vgtnc_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgt.nc %v0, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, i32 256)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtnc.vvssml(<256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4) {
+; CHECK-LABEL: vgtnc_vvssmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgt.nc %v1, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %6 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4, i32 128)
+  ret <256 x double> %6
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtnc.vvssmvl(<256 x double>, i64, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssml_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtnc_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt.nc %v0, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssml(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssmvl_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtnc_vvssmvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt.nc %v1, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssmvl(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssml_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtnc_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt.nc %v0, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssml(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssmvl_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtnc_vvssmvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt.nc %v1, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssmvl(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssml_imm_3(<256 x double> %0, <256 x i1> %1) {
+; CHECK-LABEL: vgtnc_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgt.nc %v0, %v0, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssml(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssmvl_imm_3(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vgtnc_vvssmvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgt.nc %v1, %v0, 8, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssmvl(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtnc_vvssl_no_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtnc_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgt.nc %v0, %v0, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtnc.vvssl(<256 x double> %0, i64 %1, i64 8, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssl(<256 x double> %0, i64 %1, i64 %2) {
+; CHECK-LABEL: vgtu_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtu %v0, %v0, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssl(<256 x double> %0, i64 %1, i64 %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtu.vvssl(<256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3) {
+; CHECK-LABEL: vgtu_vvssvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtu %v1, %v0, %s0, %s1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtu.vvssvl(<256 x double>, i64, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssl_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtu_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu %v0, %v0, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssl(<256 x double> %0, i64 %1, i64 0, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssvl_imm_1(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtu_vvssvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu %v1, %v0, %s0, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssvl(<256 x double> %0, i64 %1, i64 0, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssl_imm_2(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtu_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu %v0, %v0, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssl(<256 x double> %0, i64 8, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssvl_imm_2(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtu_vvssvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu %v1, %v0, 8, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssvl(<256 x double> %0, i64 8, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssl_imm_3(<256 x double> %0) {
+; CHECK-LABEL: vgtu_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtu %v0, %v0, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssl(<256 x double> %0, i64 8, i64 0, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssvl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vgtu_vvssvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtu %v1, %v0, 8, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssvl(<256 x double> %0, i64 8, i64 0, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vgtu_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtu %v0, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, i32 256)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtu.vvssml(<256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4) {
+; CHECK-LABEL: vgtu_vvssmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtu %v1, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %6 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4, i32 128)
+  ret <256 x double> %6
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtu.vvssmvl(<256 x double>, i64, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssml_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtu_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu %v0, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssml(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssmvl_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtu_vvssmvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu %v1, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssmvl(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssml_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtu_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu %v0, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssml(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssmvl_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtu_vvssmvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu %v1, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssmvl(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssml_imm_3(<256 x double> %0, <256 x i1> %1) {
+; CHECK-LABEL: vgtu_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtu %v0, %v0, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssml(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssmvl_imm_3(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vgtu_vvssmvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtu %v1, %v0, 8, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssmvl(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtu_vvssl_no_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtu_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu %v0, %v0, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtu.vvssl(<256 x double> %0, i64 %1, i64 8, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssl(<256 x double> %0, i64 %1, i64 %2) {
+; CHECK-LABEL: vgtunc_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtu.nc %v0, %v0, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssl(<256 x double> %0, i64 %1, i64 %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtunc.vvssl(<256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3) {
+; CHECK-LABEL: vgtunc_vvssvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtu.nc %v1, %v0, %s0, %s1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtunc.vvssvl(<256 x double>, i64, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssl_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtunc_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu.nc %v0, %v0, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssl(<256 x double> %0, i64 %1, i64 0, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssvl_imm_1(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtunc_vvssvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu.nc %v1, %v0, %s0, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssvl(<256 x double> %0, i64 %1, i64 0, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssl_imm_2(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtunc_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu.nc %v0, %v0, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssl(<256 x double> %0, i64 8, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssvl_imm_2(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtunc_vvssvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu.nc %v1, %v0, 8, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssvl(<256 x double> %0, i64 8, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssl_imm_3(<256 x double> %0) {
+; CHECK-LABEL: vgtunc_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtu.nc %v0, %v0, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssl(<256 x double> %0, i64 8, i64 0, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssvl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vgtunc_vvssvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtu.nc %v1, %v0, 8, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssvl(<256 x double> %0, i64 8, i64 0, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vgtunc_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtu.nc %v0, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, i32 256)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtunc.vvssml(<256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4) {
+; CHECK-LABEL: vgtunc_vvssmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtu.nc %v1, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %6 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4, i32 128)
+  ret <256 x double> %6
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtunc.vvssmvl(<256 x double>, i64, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssml_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtunc_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu.nc %v0, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssml(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssmvl_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtunc_vvssmvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu.nc %v1, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssmvl(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssml_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtunc_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu.nc %v0, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssml(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssmvl_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtunc_vvssmvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu.nc %v1, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssmvl(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssml_imm_3(<256 x double> %0, <256 x i1> %1) {
+; CHECK-LABEL: vgtunc_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtu.nc %v0, %v0, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssml(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssmvl_imm_3(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vgtunc_vvssmvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtu.nc %v1, %v0, 8, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssmvl(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtunc_vvssl_no_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtunc_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtu.nc %v0, %v0, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtunc.vvssl(<256 x double> %0, i64 %1, i64 8, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssl(<256 x double> %0, i64 %1, i64 %2) {
+; CHECK-LABEL: vgtlsx_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.sx %v0, %v0, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssl(<256 x double> %0, i64 %1, i64 %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlsx.vvssl(<256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlsx_vvssvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.sx %v1, %v0, %s0, %s1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlsx.vvssvl(<256 x double>, i64, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssl_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlsx_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx %v0, %v0, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssl(<256 x double> %0, i64 %1, i64 0, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssvl_imm_1(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlsx_vvssvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx %v1, %v0, %s0, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssvl(<256 x double> %0, i64 %1, i64 0, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssl_imm_2(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlsx_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx %v0, %v0, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssl(<256 x double> %0, i64 8, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssvl_imm_2(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlsx_vvssvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx %v1, %v0, 8, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssvl(<256 x double> %0, i64 8, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssl_imm_3(<256 x double> %0) {
+; CHECK-LABEL: vgtlsx_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.sx %v0, %v0, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssl(<256 x double> %0, i64 8, i64 0, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssvl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vgtlsx_vvssvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.sx %v1, %v0, 8, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssvl(<256 x double> %0, i64 8, i64 0, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vgtlsx_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.sx %v0, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, i32 256)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlsx.vvssml(<256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4) {
+; CHECK-LABEL: vgtlsx_vvssmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.sx %v1, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %6 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4, i32 128)
+  ret <256 x double> %6
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlsx.vvssmvl(<256 x double>, i64, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssml_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtlsx_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx %v0, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssml(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssmvl_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlsx_vvssmvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx %v1, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssmvl(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssml_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtlsx_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx %v0, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssml(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssmvl_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlsx_vvssmvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx %v1, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssmvl(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssml_imm_3(<256 x double> %0, <256 x i1> %1) {
+; CHECK-LABEL: vgtlsx_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.sx %v0, %v0, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssml(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssmvl_imm_3(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlsx_vvssmvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.sx %v1, %v0, 8, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssmvl(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsx_vvssl_no_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlsx_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx %v0, %v0, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlsx.vvssl(<256 x double> %0, i64 %1, i64 8, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssl(<256 x double> %0, i64 %1, i64 %2) {
+; CHECK-LABEL: vgtlsxnc_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.sx.nc %v0, %v0, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssl(<256 x double> %0, i64 %1, i64 %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlsxnc.vvssl(<256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlsxnc_vvssvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.sx.nc %v1, %v0, %s0, %s1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlsxnc.vvssvl(<256 x double>, i64, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssl_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlsxnc_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx.nc %v0, %v0, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssl(<256 x double> %0, i64 %1, i64 0, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssvl_imm_1(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlsxnc_vvssvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx.nc %v1, %v0, %s0, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssvl(<256 x double> %0, i64 %1, i64 0, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssl_imm_2(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlsxnc_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx.nc %v0, %v0, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssl(<256 x double> %0, i64 8, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssvl_imm_2(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlsxnc_vvssvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx.nc %v1, %v0, 8, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssvl(<256 x double> %0, i64 8, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssl_imm_3(<256 x double> %0) {
+; CHECK-LABEL: vgtlsxnc_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.sx.nc %v0, %v0, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssl(<256 x double> %0, i64 8, i64 0, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssvl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vgtlsxnc_vvssvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.sx.nc %v1, %v0, 8, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssvl(<256 x double> %0, i64 8, i64 0, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vgtlsxnc_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.sx.nc %v0, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, i32 256)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlsxnc.vvssml(<256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4) {
+; CHECK-LABEL: vgtlsxnc_vvssmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.sx.nc %v1, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %6 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4, i32 128)
+  ret <256 x double> %6
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlsxnc.vvssmvl(<256 x double>, i64, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssml_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtlsxnc_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx.nc %v0, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssml(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssmvl_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlsxnc_vvssmvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx.nc %v1, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssmvl(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssml_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtlsxnc_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx.nc %v0, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssml(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssmvl_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlsxnc_vvssmvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx.nc %v1, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssmvl(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssml_imm_3(<256 x double> %0, <256 x i1> %1) {
+; CHECK-LABEL: vgtlsxnc_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.sx.nc %v0, %v0, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssml(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssmvl_imm_3(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlsxnc_vvssmvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.sx.nc %v1, %v0, 8, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssmvl(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlsxnc_vvssl_no_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlsxnc_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.sx.nc %v0, %v0, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlsxnc.vvssl(<256 x double> %0, i64 %1, i64 8, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssl(<256 x double> %0, i64 %1, i64 %2) {
+; CHECK-LABEL: vgtlzx_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.zx %v0, %v0, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssl(<256 x double> %0, i64 %1, i64 %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlzx.vvssl(<256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlzx_vvssvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.zx %v1, %v0, %s0, %s1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlzx.vvssvl(<256 x double>, i64, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssl_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlzx_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx %v0, %v0, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssl(<256 x double> %0, i64 %1, i64 0, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssvl_imm_1(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlzx_vvssvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx %v1, %v0, %s0, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssvl(<256 x double> %0, i64 %1, i64 0, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssl_imm_2(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlzx_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx %v0, %v0, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssl(<256 x double> %0, i64 8, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssvl_imm_2(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlzx_vvssvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx %v1, %v0, 8, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssvl(<256 x double> %0, i64 8, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssl_imm_3(<256 x double> %0) {
+; CHECK-LABEL: vgtlzx_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.zx %v0, %v0, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssl(<256 x double> %0, i64 8, i64 0, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssvl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vgtlzx_vvssvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.zx %v1, %v0, 8, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssvl(<256 x double> %0, i64 8, i64 0, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vgtlzx_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.zx %v0, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, i32 256)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlzx.vvssml(<256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4) {
+; CHECK-LABEL: vgtlzx_vvssmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.zx %v1, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %6 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4, i32 128)
+  ret <256 x double> %6
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlzx.vvssmvl(<256 x double>, i64, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssml_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtlzx_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx %v0, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssml(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssmvl_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlzx_vvssmvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx %v1, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssmvl(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssml_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtlzx_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx %v0, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssml(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssmvl_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlzx_vvssmvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx %v1, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssmvl(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssml_imm_3(<256 x double> %0, <256 x i1> %1) {
+; CHECK-LABEL: vgtlzx_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.zx %v0, %v0, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssml(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssmvl_imm_3(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlzx_vvssmvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.zx %v1, %v0, 8, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssmvl(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzx_vvssl_no_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlzx_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx %v0, %v0, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlzx.vvssl(<256 x double> %0, i64 %1, i64 8, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssl(<256 x double> %0, i64 %1, i64 %2) {
+; CHECK-LABEL: vgtlzxnc_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.zx.nc %v0, %v0, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssl(<256 x double> %0, i64 %1, i64 %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlzxnc.vvssl(<256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlzxnc_vvssvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.zx.nc %v1, %v0, %s0, %s1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlzxnc.vvssvl(<256 x double>, i64, i64, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssl_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlzxnc_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx.nc %v0, %v0, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssl(<256 x double> %0, i64 %1, i64 0, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssvl_imm_1(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlzxnc_vvssvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx.nc %v1, %v0, %s0, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssvl(<256 x double> %0, i64 %1, i64 0, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssl_imm_2(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlzxnc_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx.nc %v0, %v0, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssl(<256 x double> %0, i64 8, i64 %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssvl_imm_2(<256 x double> %0, i64 %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlzxnc_vvssvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx.nc %v1, %v0, 8, %s0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssvl(<256 x double> %0, i64 8, i64 %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssl_imm_3(<256 x double> %0) {
+; CHECK-LABEL: vgtlzxnc_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.zx.nc %v0, %v0, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssl(<256 x double> %0, i64 8, i64 0, i32 256)
+  ret <256 x double> %2
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssvl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vgtlzxnc_vvssvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.zx.nc %v1, %v0, 8, 0
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssvl(<256 x double> %0, i64 8, i64 0, <256 x double> %1, i32 128)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vgtlzxnc_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.zx.nc %v0, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssml(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, i32 256)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlzxnc.vvssml(<256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4) {
+; CHECK-LABEL: vgtlzxnc_vvssmvl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 128
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vgtl.zx.nc %v1, %v0, %s0, %s1, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %6 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4, i32 128)
+  ret <256 x double> %6
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vgtlzxnc.vvssmvl(<256 x double>, i64, i64, <256 x i1>, <256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssml_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtlzxnc_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx.nc %v0, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssml(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssmvl_imm_1(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlzxnc_vvssmvl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx.nc %v1, %v0, %s0, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssmvl(<256 x double> %0, i64 %1, i64 0, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssml_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2) {
+; CHECK-LABEL: vgtlzxnc_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx.nc %v0, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssml(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, i32 256)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssmvl_imm_2(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
+; CHECK-LABEL: vgtlzxnc_vvssmvl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 128
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx.nc %v1, %v0, 8, %s0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssmvl(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
+  ret <256 x double> %5
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssml_imm_3(<256 x double> %0, <256 x i1> %1) {
+; CHECK-LABEL: vgtlzxnc_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.zx.nc %v0, %v0, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssml(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, i32 256)
+  ret <256 x double> %3
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssmvl_imm_3(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
+; CHECK-LABEL: vgtlzxnc_vvssmvl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vgtl.zx.nc %v1, %v0, 8, 0, %vm1
+; CHECK-NEXT:    lea %s16, 256
+; CHECK-NEXT:    lvl %s16
+; CHECK-NEXT:    vor %v0, (0)1, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssmvl(<256 x double> %0, i64 8, i64 0, <256 x i1> %1, <256 x double> %2, i32 128)
+  ret <256 x double> %4
+}
+
+; Function Attrs: nounwind readonly
+define fastcc <256 x double> @vgtlzxnc_vvssl_no_imm_1(<256 x double> %0, i64 %1) {
+; CHECK-LABEL: vgtlzxnc_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vgtl.zx.nc %v0, %v0, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vgtlzxnc.vvssl(<256 x double> %0, i64 %1, i64 8, i32 256)
+  ret <256 x double> %3
+}

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/vsc.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vsc.ll
new file mode 100644
index 000000000000..8e02bb241df3
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/vsc.ll
@@ -0,0 +1,1387 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test vector scatter intrinsic instructions
+;;;
+;;; Note:
+;;;   We test VSC*vrrvl, VSC*vrzvl, VSC*virvl, VSC*vizvl, VSC*vrrvml,
+;;;   VSC*vrzvml, VSC*virvml, and VSC*vizvml instructions.
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsc_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vsc_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vsc %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vsc.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsc_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsc_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsc_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsc_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsc.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsc_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsc_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsc %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsc.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsc_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vsc_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vsc %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsc.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vsc.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsc_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vsc_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsc.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsc_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vsc_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsc.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsc_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vsc_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsc %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsc.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsc_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsc_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscnc_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vscnc_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vsc.nc %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscnc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscnc.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscnc_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscnc_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.nc %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscnc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscnc_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscnc_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.nc %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscnc.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscnc_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vscnc_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsc.nc %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscnc.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscnc_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vscnc_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vsc.nc %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscnc.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscnc.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscnc_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscnc_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.nc %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscnc.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscnc_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscnc_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.nc %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscnc.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscnc_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vscnc_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsc.nc %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscnc.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscnc_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscnc_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.nc %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscnc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscot_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vscot_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vsc.ot %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscot.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscot_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscot_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.ot %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscot_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscot_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.ot %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscot_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vscot_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsc.ot %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscot_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vscot_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vsc.ot %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscot.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscot_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscot_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.ot %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscot_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscot_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.ot %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscot_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vscot_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsc.ot %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscot_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscot_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.ot %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscncot_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vscncot_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vsc.nc.ot %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscncot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscncot.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscncot_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscncot_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.nc.ot %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscncot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscncot_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscncot_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.nc.ot %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscncot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscncot_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vscncot_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsc.nc.ot %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscncot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscncot_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vscncot_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vsc.nc.ot %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscncot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscncot.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscncot_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscncot_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.nc.ot %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscncot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscncot_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscncot_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.nc.ot %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscncot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscncot_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vscncot_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vsc.nc.ot %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscncot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscncot_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscncot_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vsc.nc.ot %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscncot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscu_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vscu_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscu %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscu.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscu.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscu_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscu_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscu.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscu_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscu_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscu.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscu_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vscu_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscu %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscu.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscu_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vscu_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscu %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscu.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscu.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscu_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscu_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscu.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscu_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscu_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscu.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscu_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vscu_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscu %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscu.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscu_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscu_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscu.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscunc_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vscunc_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscu.nc %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscunc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscunc.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscunc_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscunc_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.nc %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscunc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscunc_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscunc_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.nc %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscunc.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscunc_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vscunc_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscu.nc %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscunc.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscunc_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vscunc_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscu.nc %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscunc.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscunc.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscunc_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscunc_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.nc %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscunc.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscunc_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscunc_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.nc %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscunc.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscunc_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vscunc_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscu.nc %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscunc.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscunc_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscunc_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.nc %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscunc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuot_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vscuot_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscu.ot %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscuot.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuot_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscuot_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.ot %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuot_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscuot_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.ot %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuot_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vscuot_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscu.ot %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuot_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vscuot_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscu.ot %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscuot.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuot_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscuot_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.ot %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuot_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscuot_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.ot %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuot_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vscuot_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscu.ot %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuot_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscuot_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.ot %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuncot_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vscuncot_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscu.nc.ot %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuncot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscuncot.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuncot_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscuncot_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.nc.ot %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuncot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuncot_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscuncot_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.nc.ot %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuncot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuncot_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vscuncot_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscu.nc.ot %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuncot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuncot_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vscuncot_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscu.nc.ot %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuncot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscuncot.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuncot_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscuncot_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.nc.ot %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuncot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuncot_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscuncot_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.nc.ot %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuncot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuncot_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vscuncot_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscu.nc.ot %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuncot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscuncot_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscuncot_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscu.nc.ot %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscuncot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscl_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vscl_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscl %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscl.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscl.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscl_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscl_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscl.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscl_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscl_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscl.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscl_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vscl_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscl %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscl.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscl_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vscl_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscl %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscl.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vscl.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscl_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscl_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscl.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscl_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vscl_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscl.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscl_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vscl_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscl %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscl.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vscl_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vscl_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vscl.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclnc_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vsclnc_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscl.nc %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclnc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vsclnc.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclnc_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsclnc_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.nc %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclnc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclnc_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsclnc_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.nc %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclnc.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclnc_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsclnc_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscl.nc %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclnc.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclnc_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vsclnc_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscl.nc %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclnc.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vsclnc.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclnc_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vsclnc_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.nc %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclnc.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclnc_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vsclnc_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.nc %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclnc.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclnc_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vsclnc_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscl.nc %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclnc.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclnc_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsclnc_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.nc %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclnc.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclot_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vsclot_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscl.ot %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vsclot.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclot_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsclot_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.ot %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclot_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsclot_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.ot %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclot_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsclot_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscl.ot %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclot_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vsclot_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscl.ot %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vsclot.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclot_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vsclot_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.ot %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclot_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vsclot_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.ot %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclot_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vsclot_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscl.ot %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclot_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsclot_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.ot %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclncot_vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3) {
+; CHECK-LABEL: vsclncot_vvssl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscl.nc.ot %v0, %v1, %s0, %s1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclncot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vsclncot.vvssl(<256 x double>, <256 x double>, i64, i64, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclncot_vvssl_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsclncot_vvssl_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.nc.ot %v0, %v1, %s0, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclncot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclncot_vvssl_imm_2(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsclncot_vvssl_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.nc.ot %v0, %v1, 8, %s0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclncot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclncot_vvssl_imm_3(<256 x double> %0, <256 x double> %1) {
+; CHECK-LABEL: vsclncot_vvssl_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscl.nc.ot %v0, %v1, 8, 0
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclncot.vvssl(<256 x double> %0, <256 x double> %1, i64 8, i64 0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclncot_vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4) {
+; CHECK-LABEL: vsclncot_vvssml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vscl.nc.ot %v0, %v1, %s0, %s1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclncot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 %3, <256 x i1> %4, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vsclncot.vvssml(<256 x double>, <256 x double>, i64, i64, <256 x i1>, i32)
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclncot_vvssml_imm_1(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vsclncot_vvssml_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.nc.ot %v0, %v1, %s0, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclncot.vvssml(<256 x double> %0, <256 x double> %1, i64 %2, i64 0, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclncot_vvssml_imm_2(<256 x double> %0, <256 x double> %1, i64 %2, <256 x i1> %3) {
+; CHECK-LABEL: vsclncot_vvssml_imm_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.nc.ot %v0, %v1, 8, %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclncot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 %2, <256 x i1> %3, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclncot_vvssml_imm_3(<256 x double> %0, <256 x double> %1, <256 x i1> %2) {
+; CHECK-LABEL: vsclncot_vvssml_imm_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vscl.nc.ot %v0, %v1, 8, 0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclncot.vvssml(<256 x double> %0, <256 x double> %1, i64 8, i64 0, <256 x i1> %2, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind writeonly
+define fastcc void @vsclncot_vvssl_no_imm_1(<256 x double> %0, <256 x double> %1, i64 %2) {
+; CHECK-LABEL: vsclncot_vvssl_no_imm_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    or %s2, 8, (0)1
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vscl.nc.ot %v0, %v1, %s0, %s2
+; CHECK-NEXT:    b.l.t (, %s10)
+  tail call void @llvm.ve.vl.vsclncot.vvssl(<256 x double> %0, <256 x double> %1, i64 %2, i64 8, i32 256)
+  ret void
+}


        


More information about the llvm-commits mailing list