[llvm] 8e7ceff - [RISCV] Fix crash when inserting large fixed-length subvectors

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 4 01:33:39 PST 2021


Author: Fraser Cormack
Date: 2021-03-04T09:27:16Z
New Revision: 8e7ceffd0b78b17e92fb9e8b3fda6aa6e122eb87

URL: https://github.com/llvm/llvm-project/commit/8e7ceffd0b78b17e92fb9e8b3fda6aa6e122eb87
DIFF: https://github.com/llvm/llvm-project/commit/8e7ceffd0b78b17e92fb9e8b3fda6aa6e122eb87.diff

LOG: [RISCV] Fix crash when inserting large fixed-length subvectors

This patch addresses a compiler crash resulting from passing a
fixed-length type to one that expects scalable vector types. An
assertion was added to prevent this regressing in the future.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D97868

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv32.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 552495cc864c..1a5d834b3164 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -966,7 +966,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     if (Idx != 0)
       break;
 
-    RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
+    RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
     bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 ||
                            SubVecLMUL == RISCVVLMUL::LMUL_F4 ||
                            SubVecLMUL == RISCVVLMUL::LMUL_F8;

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d1a3b8d6f4f8..81c118a2e40d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -837,6 +837,7 @@ static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
 }
 
 RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) {
+  assert(VT.isScalableVector() && "Expecting a scalable vector type");
   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
   if (VT.getVectorElementType() == MVT::i1)
     KnownSize *= 8;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv32.ll
index 2dc59a23f1fc..89cb92bd7ff9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv32.ll
@@ -1,4 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
 
@@ -50,6 +51,14 @@ define void @splat_v4i32(<4 x i32>* %x, i32 %y) {
 ;}
 
 define void @splat_v32i8(<32 x i8>* %x, i8 %y) {
+; LMULMAX8-LABEL: splat_v32i8:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    addi a2, zero, 32
+; LMULMAX8-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.x v26, a1
+; LMULMAX8-NEXT:    vse8.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_v32i8:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    addi a2, zero, 32
@@ -73,6 +82,13 @@ define void @splat_v32i8(<32 x i8>* %x, i8 %y) {
 }
 
 define void @splat_v16i16(<16 x i16>* %x, i16 %y) {
+; LMULMAX8-LABEL: splat_v16i16:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.x v26, a1
+; LMULMAX8-NEXT:    vse16.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_v16i16:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
@@ -95,6 +111,13 @@ define void @splat_v16i16(<16 x i16>* %x, i16 %y) {
 }
 
 define void @splat_v8i32(<8 x i32>* %x, i32 %y) {
+; LMULMAX8-LABEL: splat_v8i32:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.x v26, a1
+; LMULMAX8-NEXT:    vse32.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_v8i32:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
@@ -177,6 +200,14 @@ define void @splat_zero_v2i64(<2 x i64>* %x) {
 }
 
 define void @splat_zero_v32i8(<32 x i8>* %x) {
+; LMULMAX8-LABEL: splat_zero_v32i8:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    addi a1, zero, 32
+; LMULMAX8-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.i v26, 0
+; LMULMAX8-NEXT:    vse8.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_zero_v32i8:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    addi a1, zero, 32
@@ -200,6 +231,13 @@ define void @splat_zero_v32i8(<32 x i8>* %x) {
 }
 
 define void @splat_zero_v16i16(<16 x i16>* %x) {
+; LMULMAX8-LABEL: splat_zero_v16i16:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.i v26, 0
+; LMULMAX8-NEXT:    vse16.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_zero_v16i16:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
@@ -222,6 +260,13 @@ define void @splat_zero_v16i16(<16 x i16>* %x) {
 }
 
 define void @splat_zero_v8i32(<8 x i32>* %x) {
+; LMULMAX8-LABEL: splat_zero_v8i32:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.i v26, 0
+; LMULMAX8-NEXT:    vse32.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_zero_v8i32:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
@@ -244,6 +289,13 @@ define void @splat_zero_v8i32(<8 x i32>* %x) {
 }
 
 define void @splat_zero_v4i64(<4 x i64>* %x) {
+; LMULMAX8-LABEL: splat_zero_v4i64:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.i v26, 0
+; LMULMAX8-NEXT:    vse32.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_zero_v4i64:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
@@ -318,6 +370,14 @@ define void @splat_allones_v2i64(<2 x i64>* %x) {
 }
 
 define void @splat_allones_v32i8(<32 x i8>* %x) {
+; LMULMAX8-LABEL: splat_allones_v32i8:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    addi a1, zero, 32
+; LMULMAX8-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.i v26, -1
+; LMULMAX8-NEXT:    vse8.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_allones_v32i8:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    addi a1, zero, 32
@@ -341,6 +401,13 @@ define void @splat_allones_v32i8(<32 x i8>* %x) {
 }
 
 define void @splat_allones_v16i16(<16 x i16>* %x) {
+; LMULMAX8-LABEL: splat_allones_v16i16:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.i v26, -1
+; LMULMAX8-NEXT:    vse16.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_allones_v16i16:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
@@ -363,6 +430,13 @@ define void @splat_allones_v16i16(<16 x i16>* %x) {
 }
 
 define void @splat_allones_v8i32(<8 x i32>* %x) {
+; LMULMAX8-LABEL: splat_allones_v8i32:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.i v26, -1
+; LMULMAX8-NEXT:    vse32.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_allones_v8i32:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
@@ -385,6 +459,13 @@ define void @splat_allones_v8i32(<8 x i32>* %x) {
 }
 
 define void @splat_allones_v4i64(<4 x i64>* %x) {
+; LMULMAX8-LABEL: splat_allones_v4i64:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.i v26, -1
+; LMULMAX8-NEXT:    vse32.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_allones_v4i64:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
@@ -411,6 +492,17 @@ define void @splat_allones_v4i64(<4 x i64>* %x) {
 ; FIXME: We should prevent this and use the implicit sign extension of vmv.v.x
 ; with SEW=64 on RV32.
 define void @splat_allones_with_use_v4i64(<4 x i64>* %x) {
+; LMULMAX8-LABEL: splat_allones_with_use_v4i64:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
+; LMULMAX8-NEXT:    vle64.v v26, (a0)
+; LMULMAX8-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
+; LMULMAX8-NEXT:    vmv.v.i v28, -1
+; LMULMAX8-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
+; LMULMAX8-NEXT:    vadd.vv v26, v26, v28
+; LMULMAX8-NEXT:    vse64.v v26, (a0)
+; LMULMAX8-NEXT:    ret
+;
 ; LMULMAX2-LABEL: splat_allones_with_use_v4i64:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
@@ -441,3 +533,172 @@ define void @splat_allones_with_use_v4i64(<4 x i64>* %x) {
   store <4 x i64> %b, <4 x i64>* %x
   ret void
 }
+
+; This test used to crash at LMUL=8 when inserting a v16i64 subvector into
+; nxv8i64 at index 0: the v16i64 type was used to get the LMUL, the size of
+; which exceeded maximum-expected size of 512. The scalable container type of
+; nxv8i64 should have been used instead.
+define void @vadd_vx_v16i64(<16 x i64>* %a, i64 %b, <16 x i64>* %c) {
+; LMULMAX8-LABEL: vadd_vx_v16i64:
+; LMULMAX8:       # %bb.0:
+; LMULMAX8-NEXT:    addi sp, sp, -256
+; LMULMAX8-NEXT:    .cfi_def_cfa_offset 256
+; LMULMAX8-NEXT:    sw ra, 252(sp) # 4-byte Folded Spill
+; LMULMAX8-NEXT:    sw s0, 248(sp) # 4-byte Folded Spill
+; LMULMAX8-NEXT:    .cfi_offset ra, -4
+; LMULMAX8-NEXT:    .cfi_offset s0, -8
+; LMULMAX8-NEXT:    addi s0, sp, 256
+; LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
+; LMULMAX8-NEXT:    andi sp, sp, -128
+; LMULMAX8-NEXT:    vsetivli a4, 16, e64,m8,ta,mu
+; LMULMAX8-NEXT:    vle64.v v8, (a0)
+; LMULMAX8-NEXT:    sw a2, 124(sp)
+; LMULMAX8-NEXT:    sw a1, 120(sp)
+; LMULMAX8-NEXT:    sw a2, 116(sp)
+; LMULMAX8-NEXT:    sw a1, 112(sp)
+; LMULMAX8-NEXT:    sw a2, 108(sp)
+; LMULMAX8-NEXT:    sw a1, 104(sp)
+; LMULMAX8-NEXT:    sw a2, 100(sp)
+; LMULMAX8-NEXT:    sw a1, 96(sp)
+; LMULMAX8-NEXT:    sw a2, 92(sp)
+; LMULMAX8-NEXT:    sw a1, 88(sp)
+; LMULMAX8-NEXT:    sw a2, 84(sp)
+; LMULMAX8-NEXT:    sw a1, 80(sp)
+; LMULMAX8-NEXT:    sw a2, 76(sp)
+; LMULMAX8-NEXT:    sw a1, 72(sp)
+; LMULMAX8-NEXT:    sw a2, 68(sp)
+; LMULMAX8-NEXT:    sw a1, 64(sp)
+; LMULMAX8-NEXT:    sw a2, 60(sp)
+; LMULMAX8-NEXT:    sw a1, 56(sp)
+; LMULMAX8-NEXT:    sw a2, 52(sp)
+; LMULMAX8-NEXT:    sw a1, 48(sp)
+; LMULMAX8-NEXT:    sw a2, 44(sp)
+; LMULMAX8-NEXT:    sw a1, 40(sp)
+; LMULMAX8-NEXT:    sw a2, 36(sp)
+; LMULMAX8-NEXT:    sw a1, 32(sp)
+; LMULMAX8-NEXT:    sw a2, 28(sp)
+; LMULMAX8-NEXT:    sw a1, 24(sp)
+; LMULMAX8-NEXT:    sw a2, 20(sp)
+; LMULMAX8-NEXT:    sw a1, 16(sp)
+; LMULMAX8-NEXT:    sw a2, 12(sp)
+; LMULMAX8-NEXT:    sw a1, 8(sp)
+; LMULMAX8-NEXT:    sw a2, 4(sp)
+; LMULMAX8-NEXT:    sw a1, 0(sp)
+; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; LMULMAX8-NEXT:    vle32.v v16, (sp)
+; LMULMAX8-NEXT:    vsetivli a0, 16, e64,m8,ta,mu
+; LMULMAX8-NEXT:    vadd.vv v8, v8, v16
+; LMULMAX8-NEXT:    vse64.v v8, (a3)
+; LMULMAX8-NEXT:    addi sp, s0, -256
+; LMULMAX8-NEXT:    lw s0, 248(sp) # 4-byte Folded Reload
+; LMULMAX8-NEXT:    lw ra, 252(sp) # 4-byte Folded Reload
+; LMULMAX8-NEXT:    addi sp, sp, 256
+; LMULMAX8-NEXT:    ret
+;
+; LMULMAX2-LABEL: vadd_vx_v16i64:
+; LMULMAX2:       # %bb.0:
+; LMULMAX2-NEXT:    addi sp, sp, -64
+; LMULMAX2-NEXT:    .cfi_def_cfa_offset 64
+; LMULMAX2-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; LMULMAX2-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; LMULMAX2-NEXT:    .cfi_offset ra, -4
+; LMULMAX2-NEXT:    .cfi_offset s0, -8
+; LMULMAX2-NEXT:    addi s0, sp, 64
+; LMULMAX2-NEXT:    .cfi_def_cfa s0, 0
+; LMULMAX2-NEXT:    andi sp, sp, -32
+; LMULMAX2-NEXT:    addi a4, a0, 64
+; LMULMAX2-NEXT:    vsetivli a5, 4, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vle64.v v26, (a4)
+; LMULMAX2-NEXT:    addi a4, a0, 96
+; LMULMAX2-NEXT:    vle64.v v28, (a4)
+; LMULMAX2-NEXT:    vle64.v v30, (a0)
+; LMULMAX2-NEXT:    addi a0, a0, 32
+; LMULMAX2-NEXT:    vle64.v v8, (a0)
+; LMULMAX2-NEXT:    sw a2, 28(sp)
+; LMULMAX2-NEXT:    sw a1, 24(sp)
+; LMULMAX2-NEXT:    sw a2, 20(sp)
+; LMULMAX2-NEXT:    sw a1, 16(sp)
+; LMULMAX2-NEXT:    sw a2, 12(sp)
+; LMULMAX2-NEXT:    sw a1, 8(sp)
+; LMULMAX2-NEXT:    sw a2, 4(sp)
+; LMULMAX2-NEXT:    sw a1, 0(sp)
+; LMULMAX2-NEXT:    vsetivli a0, 8, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vle32.v v10, (sp)
+; LMULMAX2-NEXT:    vsetivli a0, 4, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vadd.vv v8, v8, v10
+; LMULMAX2-NEXT:    vadd.vv v30, v30, v10
+; LMULMAX2-NEXT:    vadd.vv v28, v28, v10
+; LMULMAX2-NEXT:    vadd.vv v26, v26, v10
+; LMULMAX2-NEXT:    addi a0, a3, 64
+; LMULMAX2-NEXT:    vse64.v v26, (a0)
+; LMULMAX2-NEXT:    addi a0, a3, 96
+; LMULMAX2-NEXT:    vse64.v v28, (a0)
+; LMULMAX2-NEXT:    vse64.v v30, (a3)
+; LMULMAX2-NEXT:    addi a0, a3, 32
+; LMULMAX2-NEXT:    vse64.v v8, (a0)
+; LMULMAX2-NEXT:    addi sp, s0, -64
+; LMULMAX2-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; LMULMAX2-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; LMULMAX2-NEXT:    addi sp, sp, 64
+; LMULMAX2-NEXT:    ret
+;
+; LMULMAX1-LABEL: vadd_vx_v16i64:
+; LMULMAX1:       # %bb.0:
+; LMULMAX1-NEXT:    addi sp, sp, -16
+; LMULMAX1-NEXT:    .cfi_def_cfa_offset 16
+; LMULMAX1-NEXT:    addi a4, a0, 96
+; LMULMAX1-NEXT:    vsetivli a5, 2, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vle64.v v25, (a4)
+; LMULMAX1-NEXT:    addi a4, a0, 112
+; LMULMAX1-NEXT:    vle64.v v26, (a4)
+; LMULMAX1-NEXT:    addi a4, a0, 64
+; LMULMAX1-NEXT:    vle64.v v27, (a4)
+; LMULMAX1-NEXT:    addi a4, a0, 80
+; LMULMAX1-NEXT:    vle64.v v28, (a4)
+; LMULMAX1-NEXT:    addi a4, a0, 32
+; LMULMAX1-NEXT:    vle64.v v29, (a4)
+; LMULMAX1-NEXT:    addi a4, a0, 48
+; LMULMAX1-NEXT:    vle64.v v30, (a4)
+; LMULMAX1-NEXT:    vle64.v v31, (a0)
+; LMULMAX1-NEXT:    addi a0, a0, 16
+; LMULMAX1-NEXT:    vle64.v v8, (a0)
+; LMULMAX1-NEXT:    sw a2, 12(sp)
+; LMULMAX1-NEXT:    sw a1, 8(sp)
+; LMULMAX1-NEXT:    sw a2, 4(sp)
+; LMULMAX1-NEXT:    sw a1, 0(sp)
+; LMULMAX1-NEXT:    vsetivli a0, 4, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vle32.v v9, (sp)
+; LMULMAX1-NEXT:    vsetivli a0, 2, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vadd.vv v8, v8, v9
+; LMULMAX1-NEXT:    vadd.vv v31, v31, v9
+; LMULMAX1-NEXT:    vadd.vv v30, v30, v9
+; LMULMAX1-NEXT:    vadd.vv v29, v29, v9
+; LMULMAX1-NEXT:    vadd.vv v28, v28, v9
+; LMULMAX1-NEXT:    vadd.vv v27, v27, v9
+; LMULMAX1-NEXT:    vadd.vv v26, v26, v9
+; LMULMAX1-NEXT:    vadd.vv v25, v25, v9
+; LMULMAX1-NEXT:    addi a0, a3, 96
+; LMULMAX1-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-NEXT:    addi a0, a3, 112
+; LMULMAX1-NEXT:    vse64.v v26, (a0)
+; LMULMAX1-NEXT:    addi a0, a3, 64
+; LMULMAX1-NEXT:    vse64.v v27, (a0)
+; LMULMAX1-NEXT:    addi a0, a3, 80
+; LMULMAX1-NEXT:    vse64.v v28, (a0)
+; LMULMAX1-NEXT:    addi a0, a3, 32
+; LMULMAX1-NEXT:    vse64.v v29, (a0)
+; LMULMAX1-NEXT:    addi a0, a3, 48
+; LMULMAX1-NEXT:    vse64.v v30, (a0)
+; LMULMAX1-NEXT:    vse64.v v31, (a3)
+; LMULMAX1-NEXT:    addi a0, a3, 16
+; LMULMAX1-NEXT:    vse64.v v8, (a0)
+; LMULMAX1-NEXT:    addi sp, sp, 16
+; LMULMAX1-NEXT:    ret
+  %va = load <16 x i64>, <16 x i64>* %a
+  %head = insertelement <16 x i64> undef, i64 %b, i32 0
+  %splat = shufflevector <16 x i64> %head, <16 x i64> undef, <16 x i32> zeroinitializer
+  %vc = add <16 x i64> %va, %splat
+  store <16 x i64> %vc, <16 x i64>* %c
+  ret void
+}


        


More information about the llvm-commits mailing list