[llvm] 00c4e0a - [RISCV] Guard the ISD::EXTRACT_VECTOR_ELT handling in ReplaceNodeResults against fixed vectors and non-MVT types.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 17 18:26:30 PST 2021


Author: Craig Topper
Date: 2021-02-17T18:25:38-08:00
New Revision: 00c4e0a8f60b73a92a319963e84bfc9fdeee5b19

URL: https://github.com/llvm/llvm-project/commit/00c4e0a8f60b73a92a319963e84bfc9fdeee5b19
DIFF: https://github.com/llvm/llvm-project/commit/00c4e0a8f60b73a92a319963e84bfc9fdeee5b19.diff

LOG: [RISCV] Guard the ISD::EXTRACT_VECTOR_ELT handling in ReplaceNodeResults against fixed vectors and non-MVT types.

The type legalizer is calling this code based on the scalar type so
we need to verify the input type is a scalable vector.

The vector type has also not been legalized yet when this is called
so we need to use EVT for it.

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 44f70582c717..d4b1b3efe825 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2555,18 +2555,22 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
     SDLoc DL(N);
     SDValue Vec = N->getOperand(0);
     SDValue Idx = N->getOperand(1);
-    MVT VecVT = Vec.getSimpleValueType();
+    EVT VecVT = Vec.getValueType();
     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
            VecVT.getVectorElementType() == MVT::i64 &&
            "Unexpected EXTRACT_VECTOR_ELT legalization");
 
+    if (!VecVT.isScalableVector())
+      return;
+
     SDValue Slidedown = Vec;
     MVT XLenVT = Subtarget.getXLenVT();
     // Unless the index is known to be 0, we must slide the vector down to get
     // the desired element into index 0.
     if (!isNullConstant(Idx)) {
       SDValue Mask, VL;
-      std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
+      std::tie(Mask, VL) =
+          getDefaultScalableVLOps(VecVT.getSimpleVT(), DL, DAG, Subtarget);
       Slidedown = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT,
                               DAG.getUNDEF(VecVT), Vec, Idx, Mask, VL);
     }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
new file mode 100644
index 000000000000..590494c42cea
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -0,0 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
+
+; FIXME: This codegen needs to be improved. These tests previously asserted in
+; ReplaceNodeResults on RV32.
+
+define i64 @extractelt_v4i64(<4 x i64>* %x) nounwind {
+; RV32-LABEL: extractelt_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -64
+; RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s0, sp, 64
+; RV32-NEXT:    andi sp, sp, -32
+; RV32-NEXT:    addi a1, zero, 8
+; RV32-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; RV32-NEXT:    vle32.v v26, (a0)
+; RV32-NEXT:    vse32.v v26, (sp)
+; RV32-NEXT:    lw a0, 24(sp)
+; RV32-NEXT:    lw a1, 28(sp)
+; RV32-NEXT:    addi sp, s0, -64
+; RV32-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 64
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -64
+; RV64-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi s0, sp, 64
+; RV64-NEXT:    andi sp, sp, -32
+; RV64-NEXT:    addi a1, zero, 4
+; RV64-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT:    vle64.v v26, (a0)
+; RV64-NEXT:    vse64.v v26, (sp)
+; RV64-NEXT:    ld a0, 24(sp)
+; RV64-NEXT:    addi sp, s0, -64
+; RV64-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+  %a = load <4 x i64>, <4 x i64>* %x
+  %b = extractelement <4 x i64> %a, i32 3
+  ret i64 %b
+}
+
+; This uses a non-power of 2 type so that it isn't an MVT to catch an
+; incorrect use of getSimpleValueType().
+define i64 @extractelt_v3i64(<3 x i64>* %x) nounwind {
+; RV32-LABEL: extractelt_v3i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -64
+; RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s0, sp, 64
+; RV32-NEXT:    andi sp, sp, -32
+; RV32-NEXT:    addi a1, zero, 8
+; RV32-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; RV32-NEXT:    vle32.v v26, (a0)
+; RV32-NEXT:    vse32.v v26, (sp)
+; RV32-NEXT:    lw a0, 16(sp)
+; RV32-NEXT:    lw a1, 20(sp)
+; RV32-NEXT:    addi sp, s0, -64
+; RV32-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 64
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extractelt_v3i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -64
+; RV64-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi s0, sp, 64
+; RV64-NEXT:    andi sp, sp, -32
+; RV64-NEXT:    addi a1, zero, 4
+; RV64-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT:    vle64.v v26, (a0)
+; RV64-NEXT:    vse64.v v26, (sp)
+; RV64-NEXT:    ld a0, 16(sp)
+; RV64-NEXT:    addi sp, s0, -64
+; RV64-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 64
+; RV64-NEXT:    ret
+  %a = load <3 x i64>, <3 x i64>* %x
+  %b = extractelement <3 x i64> %a, i32 2
+  ret i64 %b
+}


        


More information about the llvm-commits mailing list