[llvm] 0b55f7c - [RISCV] Fix interleave crash on unary interleaves

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 25 01:18:56 PDT 2023


Author: Luke Lau
Date: 2023-04-25T09:18:50+01:00
New Revision: 0b55f7c5c69ff2a2776a03f32ad31bedc556faf3

URL: https://github.com/llvm/llvm-project/commit/0b55f7c5c69ff2a2776a03f32ad31bedc556faf3
DIFF: https://github.com/llvm/llvm-project/commit/0b55f7c5c69ff2a2776a03f32ad31bedc556faf3.diff

LOG: [RISCV] Fix interleave crash on unary interleaves

We were crashing when lowering interleave shuffles like
(shuffle <0,3,1,4>, x:v4i8, y:v4i8)
Where it was technically an unary shuffle (both EvenSrc and OddSrc point
to the first operand), but the resulting extract_subvectors were out of
bounds.
This checks to make sure that the vectors being extracted are within
range.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D148647

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 1f6c59309a43..47d933515898 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3269,9 +3269,9 @@ static bool isDeinterleaveShuffle(MVT VT, MVT ContainerVT, SDValue V1,
 
 /// Is this shuffle interleaving contiguous elements from one vector into the
 /// even elements and contiguous elements from another vector into the odd
-/// elements. \p Src1 will contain the element that should be in the first even
-/// element. \p Src2 will contain the element that should be in the first odd
-/// element. These can be the first element in a source or the element half
+/// elements. \p EvenSrc will contain the element that should be in the first
+/// even element. \p OddSrc will contain the element that should be in the first
+/// odd element. These can be the first element in a source or the element half
 /// way through the source.
 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, int &EvenSrc,
                                 int &OddSrc, const RISCVSubtarget &Subtarget) {
@@ -3280,7 +3280,8 @@ static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, int &EvenSrc,
     return false;
 
   int Size = Mask.size();
-  assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
+  int NumElts = VT.getVectorNumElements();
+  assert(Size == (int)NumElts && "Unexpected mask size");
 
   SmallVector<unsigned, 2> StartIndexes;
   if (!ShuffleVectorInst::isInterleaveMask(Mask, 2, Size * 2, StartIndexes))
@@ -3293,7 +3294,14 @@ static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, int &EvenSrc,
   if (EvenSrc != 0 && OddSrc != 0)
     return false;
 
-  return true;
+  // Subvectors will be subtracted from either at the start of the two input
+  // vectors, or at the start and middle of the first vector if it's an unary
+  // interleave.
+  // In both cases, HalfNumElts will be extracted.
+  // So make sure that EvenSrc/OddSrc are within range.
+  int HalfNumElts = NumElts / 2;
+  return (((EvenSrc % NumElts) + HalfNumElts) <= NumElts) &&
+         (((OddSrc % NumElts) + HalfNumElts) <= NumElts);
 }
 
 /// Match shuffles that concatenate two vectors, rotate the concatenation,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
index e25ffcc0922e..ab8f45f29e72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
@@ -172,6 +172,58 @@ define <8 x i32> @interleave_v4i32(<4 x i32> %x, <4 x i32> %y) {
   ret <8 x i32> %a
 }
 
+; %y should be slid down by 2
+define <4 x i32> @interleave_v4i32_offset_2(<4 x i32> %x, <4 x i32> %y) {
+; V128-LABEL: interleave_v4i32_offset_2:
+; V128:       # %bb.0:
+; V128-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
+; V128-NEXT:    vslidedown.vi v10, v9, 2
+; V128-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; V128-NEXT:    vwaddu.vv v9, v8, v10
+; V128-NEXT:    li a0, -1
+; V128-NEXT:    vwmaccu.vx v9, a0, v10
+; V128-NEXT:    vmv1r.v v8, v9
+; V128-NEXT:    ret
+;
+; V512-LABEL: interleave_v4i32_offset_2:
+; V512:       # %bb.0:
+; V512-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; V512-NEXT:    vslidedown.vi v10, v9, 2
+; V512-NEXT:    vwaddu.vv v9, v8, v10
+; V512-NEXT:    li a0, -1
+; V512-NEXT:    vwmaccu.vx v9, a0, v10
+; V512-NEXT:    vmv1r.v v8, v9
+; V512-NEXT:    ret
+  %a = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 0, i32 6, i32 1, i32 7>
+  ret <4 x i32> %a
+}
+
+; %y should be slid down by 1
+define <4 x i32> @interleave_v4i32_offset_1(<4 x i32> %x, <4 x i32> %y) {
+; V128-LABEL: interleave_v4i32_offset_1:
+; V128:       # %bb.0:
+; V128-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
+; V128-NEXT:    vslidedown.vi v10, v9, 1
+; V128-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; V128-NEXT:    vwaddu.vv v9, v8, v10
+; V128-NEXT:    li a0, -1
+; V128-NEXT:    vwmaccu.vx v9, a0, v10
+; V128-NEXT:    vmv1r.v v8, v9
+; V128-NEXT:    ret
+;
+; V512-LABEL: interleave_v4i32_offset_1:
+; V512:       # %bb.0:
+; V512-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; V512-NEXT:    vslidedown.vi v10, v9, 1
+; V512-NEXT:    vwaddu.vv v9, v8, v10
+; V512-NEXT:    li a0, -1
+; V512-NEXT:    vwmaccu.vx v9, a0, v10
+; V512-NEXT:    vmv1r.v v8, v9
+; V512-NEXT:    ret
+  %a = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 0, i32 5, i32 1, i32 6>
+  ret <4 x i32> %a
+}
+
 define <16 x i8> @interleave_v8i8(<8 x i8> %x, <8 x i8> %y) {
 ; V128-LABEL: interleave_v8i8:
 ; V128:       # %bb.0:
@@ -362,8 +414,8 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
 ; RV32-V128-NEXT:    slli a0, a0, 4
 ; RV32-V128-NEXT:    sub sp, sp, a0
 ; RV32-V128-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; RV32-V128-NEXT:    lui a0, %hi(.LCPI15_0)
-; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI15_0)
+; RV32-V128-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI17_0)
 ; RV32-V128-NEXT:    li a1, 32
 ; RV32-V128-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-V128-NEXT:    vle32.v v0, (a0)
@@ -371,8 +423,8 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
 ; RV32-V128-NEXT:    vrgather.vv v8, v24, v0
 ; RV32-V128-NEXT:    addi a0, sp, 16
 ; RV32-V128-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV32-V128-NEXT:    lui a0, %hi(.LCPI15_1)
-; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI15_1)
+; RV32-V128-NEXT:    lui a0, %hi(.LCPI17_1)
+; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI17_1)
 ; RV32-V128-NEXT:    vle32.v v24, (a0)
 ; RV32-V128-NEXT:    csrr a0, vlenb
 ; RV32-V128-NEXT:    slli a0, a0, 3
@@ -413,8 +465,8 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
 ; RV64-V128-NEXT:    slli a0, a0, 4
 ; RV64-V128-NEXT:    sub sp, sp, a0
 ; RV64-V128-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; RV64-V128-NEXT:    lui a0, %hi(.LCPI15_0)
-; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI15_0)
+; RV64-V128-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI17_0)
 ; RV64-V128-NEXT:    li a1, 32
 ; RV64-V128-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV64-V128-NEXT:    vle32.v v0, (a0)
@@ -422,8 +474,8 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
 ; RV64-V128-NEXT:    vrgather.vv v8, v24, v0
 ; RV64-V128-NEXT:    addi a0, sp, 16
 ; RV64-V128-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV64-V128-NEXT:    lui a0, %hi(.LCPI15_1)
-; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI15_1)
+; RV64-V128-NEXT:    lui a0, %hi(.LCPI17_1)
+; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI17_1)
 ; RV64-V128-NEXT:    vle32.v v24, (a0)
 ; RV64-V128-NEXT:    csrr a0, vlenb
 ; RV64-V128-NEXT:    slli a0, a0, 3
@@ -494,6 +546,31 @@ define <4 x i8> @unary_interleave_v4i8(<4 x i8> %x) {
   ret <4 x i8> %a
 }
 
+; This shouldn't be interleaved
+define <4 x i8> @unary_interleave_v4i8_invalid(<4 x i8> %x) {
+; V128-LABEL: unary_interleave_v4i8_invalid:
+; V128:       # %bb.0:
+; V128-NEXT:    lui a0, %hi(.LCPI19_0)
+; V128-NEXT:    addi a0, a0, %lo(.LCPI19_0)
+; V128-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; V128-NEXT:    vle8.v v10, (a0)
+; V128-NEXT:    vrgather.vv v9, v8, v10
+; V128-NEXT:    vmv1r.v v8, v9
+; V128-NEXT:    ret
+;
+; V512-LABEL: unary_interleave_v4i8_invalid:
+; V512:       # %bb.0:
+; V512-NEXT:    lui a0, %hi(.LCPI19_0)
+; V512-NEXT:    addi a0, a0, %lo(.LCPI19_0)
+; V512-NEXT:    vsetivli zero, 4, e8, mf8, ta, ma
+; V512-NEXT:    vle8.v v10, (a0)
+; V512-NEXT:    vrgather.vv v9, v8, v10
+; V512-NEXT:    vmv1r.v v8, v9
+; V512-NEXT:    ret
+  %a = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 0, i32 3, i32 1, i32 4>
+  ret <4 x i8> %a
+}
+
 define <4 x i16> @unary_interleave_v4i16(<4 x i16> %x) {
 ; V128-LABEL: unary_interleave_v4i16:
 ; V128:       # %bb.0:
@@ -548,8 +625,8 @@ define <4 x i32> @unary_interleave_v4i32(<4 x i32> %x) {
 define <4 x i64> @unary_interleave_v4i64(<4 x i64> %x) {
 ; RV32-V128-LABEL: unary_interleave_v4i64:
 ; RV32-V128:       # %bb.0:
-; RV32-V128-NEXT:    lui a0, %hi(.LCPI19_0)
-; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI19_0)
+; RV32-V128-NEXT:    lui a0, %hi(.LCPI22_0)
+; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI22_0)
 ; RV32-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-V128-NEXT:    vle16.v v12, (a0)
 ; RV32-V128-NEXT:    vrgatherei16.vv v10, v8, v12
@@ -558,8 +635,8 @@ define <4 x i64> @unary_interleave_v4i64(<4 x i64> %x) {
 ;
 ; RV64-V128-LABEL: unary_interleave_v4i64:
 ; RV64-V128:       # %bb.0:
-; RV64-V128-NEXT:    lui a0, %hi(.LCPI19_0)
-; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI19_0)
+; RV64-V128-NEXT:    lui a0, %hi(.LCPI22_0)
+; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI22_0)
 ; RV64-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-V128-NEXT:    vle64.v v12, (a0)
 ; RV64-V128-NEXT:    vrgather.vv v10, v8, v12
@@ -568,8 +645,8 @@ define <4 x i64> @unary_interleave_v4i64(<4 x i64> %x) {
 ;
 ; RV32-V512-LABEL: unary_interleave_v4i64:
 ; RV32-V512:       # %bb.0:
-; RV32-V512-NEXT:    lui a0, %hi(.LCPI19_0)
-; RV32-V512-NEXT:    addi a0, a0, %lo(.LCPI19_0)
+; RV32-V512-NEXT:    lui a0, %hi(.LCPI22_0)
+; RV32-V512-NEXT:    addi a0, a0, %lo(.LCPI22_0)
 ; RV32-V512-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
 ; RV32-V512-NEXT:    vle16.v v10, (a0)
 ; RV32-V512-NEXT:    vrgatherei16.vv v9, v8, v10
@@ -578,8 +655,8 @@ define <4 x i64> @unary_interleave_v4i64(<4 x i64> %x) {
 ;
 ; RV64-V512-LABEL: unary_interleave_v4i64:
 ; RV64-V512:       # %bb.0:
-; RV64-V512-NEXT:    lui a0, %hi(.LCPI19_0)
-; RV64-V512-NEXT:    addi a0, a0, %lo(.LCPI19_0)
+; RV64-V512-NEXT:    lui a0, %hi(.LCPI22_0)
+; RV64-V512-NEXT:    addi a0, a0, %lo(.LCPI22_0)
 ; RV64-V512-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
 ; RV64-V512-NEXT:    vle64.v v10, (a0)
 ; RV64-V512-NEXT:    vrgather.vv v9, v8, v10


        


More information about the llvm-commits mailing list