[llvm] 85ae96d - [RISCV] Add v2i64 _vi_ and _iv_ test cases to fixed-vectors-int.ll since we no longer crash.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 10 19:20:40 PST 2021


Author: Craig Topper
Date: 2021-03-10T19:19:47-08:00
New Revision: 85ae96d8b2629a16857f4619f0c859df74c80878

URL: https://github.com/llvm/llvm-project/commit/85ae96d8b2629a16857f4619f0c859df74c80878
DIFF: https://github.com/llvm/llvm-project/commit/85ae96d8b2629a16857f4619f0c859df74c80878.diff

LOG: [RISCV] Add v2i64 _vi_ and _iv_ test cases to fixed-vectors-int.ll since we no longer crash.

I think we were missing some build_vector or other support and
skipped these test cases. They work now but don't generate
optimal code.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 35ac5e212793..12bfc90ca758 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -5083,6 +5083,33 @@ define void @add_vi_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @add_vi_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: add_vi_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vmv.v.i v26, -1
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vadd.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: add_vi_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vadd.vi v25, v25, -1
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 -1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = add <2 x i64> %a, %c
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @add_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: add_iv_v16i8:
 ; CHECK:       # %bb.0:
@@ -5131,6 +5158,35 @@ define void @add_iv_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @add_iv_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: add_iv_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI160_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI160_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vadd.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: add_iv_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vadd.vi v25, v25, 1
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = add <2 x i64> %c, %a
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @add_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: add_vx_v16i8:
 ; CHECK:       # %bb.0:
@@ -5278,6 +5334,34 @@ define void @sub_vi_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @sub_vi_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: sub_vi_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vmv.v.i v26, -1
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsub.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: sub_vi_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    addi a1, zero, -1
+; LMULMAX1-RV64-NEXT:    vsub.vx v25, v25, a1
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 -1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = sub <2 x i64> %a, %c
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @sub_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: sub_iv_v16i8:
 ; CHECK:       # %bb.0:
@@ -5326,6 +5410,35 @@ define void @sub_iv_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @sub_iv_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: sub_iv_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI174_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI174_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsub.vv v25, v26, v25
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: sub_iv_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vrsub.vi v25, v25, 1
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = sub <2 x i64> %c, %a
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @sub_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: sub_vx_v16i8:
 ; CHECK:       # %bb.0:
@@ -5566,6 +5679,35 @@ define void @and_vi_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @and_vi_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: and_vi_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI190_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI190_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vand.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: and_vi_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vand.vi v25, v25, -2
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 -2, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = and <2 x i64> %a, %c
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @and_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: and_iv_v16i8:
 ; CHECK:       # %bb.0:
@@ -5614,6 +5756,35 @@ define void @and_iv_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @and_iv_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: and_iv_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI194_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI194_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vand.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: and_iv_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vand.vi v25, v25, 1
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = and <2 x i64> %c, %a
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @and_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: and_vx_v16i8:
 ; CHECK:       # %bb.0:
@@ -5758,6 +5929,35 @@ define void @or_vi_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @or_vi_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: or_vi_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI204_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI204_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vor.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: or_vi_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vor.vi v25, v25, -2
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 -2, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = or <2 x i64> %a, %c
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @or_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: or_iv_v16i8:
 ; CHECK:       # %bb.0:
@@ -5806,6 +6006,35 @@ define void @or_iv_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @or_iv_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: or_iv_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI208_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI208_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vor.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: or_iv_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vor.vi v25, v25, 1
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = or <2 x i64> %c, %a
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @or_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: or_vx_v16i8:
 ; CHECK:       # %bb.0:
@@ -5950,6 +6179,33 @@ define void @xor_vi_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @xor_vi_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: xor_vi_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vmv.v.i v26, -1
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vxor.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: xor_vi_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vxor.vi v25, v25, -1
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 -1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = xor <2 x i64> %a, %c
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @xor_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: xor_iv_v16i8:
 ; CHECK:       # %bb.0:
@@ -5998,6 +6254,35 @@ define void @xor_iv_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @xor_iv_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: xor_iv_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI222_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI222_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vxor.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: xor_iv_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vxor.vi v25, v25, 1
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 1, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = xor <2 x i64> %c, %a
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @xor_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: xor_vx_v16i8:
 ; CHECK:       # %bb.0:
@@ -6142,6 +6427,35 @@ define void @lshr_vi_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @lshr_vi_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: lshr_vi_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI232_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI232_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsrl.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: lshr_vi_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vsrl.vi v25, v25, 31
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 31, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = lshr <2 x i64> %a, %c
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @lshr_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: lshr_vx_v16i8:
 ; CHECK:       # %bb.0:
@@ -6238,6 +6552,35 @@ define void @ashr_vi_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @ashr_vi_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: ashr_vi_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI239_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI239_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsra.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: ashr_vi_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vsra.vi v25, v25, 31
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 31, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = ashr <2 x i64> %a, %c
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @ashr_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: ashr_vx_v16i8:
 ; CHECK:       # %bb.0:
@@ -6334,6 +6677,35 @@ define void @shl_vi_v4i32(<4 x i32>* %x) {
   ret void
 }
 
+define void @shl_vi_v2i64(<2 x i64>* %x) {
+; LMULMAX1-RV32-LABEL: shl_vi_v2i64:
+; LMULMAX1-RV32:       # %bb.0:
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI246_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI246_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsll.vv v25, v25, v26
+; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV32-NEXT:    ret
+;
+; LMULMAX1-RV64-LABEL: shl_vi_v2i64:
+; LMULMAX1-RV64:       # %bb.0:
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vsll.vi v25, v25, 31
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = insertelement <2 x i64> undef, i64 31, i32 0
+  %c = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
+  %d = shl <2 x i64> %a, %c
+  store <2 x i64> %d, <2 x i64>* %x
+  ret void
+}
+
 define void @shl_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: shl_vx_v16i8:
 ; CHECK:       # %bb.0:
@@ -6657,14 +7029,14 @@ define void @mulhu_vx_v2i64(<2 x i64>* %x) {
 ; LMULMAX1-RV32:       # %bb.0:
 ; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI252_0)
-; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI252_0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI265_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI265_0)
 ; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
 ; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vmulhu.vv v25, v25, v26
-; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI252_1)
-; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI252_1)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI265_1)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI265_1)
 ; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
 ; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
@@ -6779,14 +7151,14 @@ define void @mulhs_vx_v2i64(<2 x i64>* %x) {
 ; LMULMAX1-RV32:       # %bb.0:
 ; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI256_0)
-; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI256_0)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI269_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI269_0)
 ; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
 ; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vmulh.vv v25, v25, v26
-; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI256_1)
-; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI256_1)
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI269_1)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI269_1)
 ; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
 ; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu


        


More information about the llvm-commits mailing list