[llvm] 1c676e0 - [RISCV] Do a more complete job of disabling extending loads and truncating stores for fixed vector types.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 29 00:24:22 PDT 2023


Author: Craig Topper
Date: 2023-06-29T00:23:16-07:00
New Revision: 1c676e08d0ac4626b6347d01c4e110a85c97f9d2

URL: https://github.com/llvm/llvm-project/commit/1c676e08d0ac4626b6347d01c4e110a85c97f9d2
DIFF: https://github.com/llvm/llvm-project/commit/1c676e08d0ac4626b6347d01c4e110a85c97f9d2.diff

LOG: [RISCV] Do a more complete job of disabling extending loads and truncating stores for fixed vector types.

We weren't marking some combinations as Expand if ones of the
types wasn't legal.

Fixes #63596.

Added: 
    llvm/test/CodeGen/RISCV/rvv/pr63596.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e394d81cd5dbe..539aa2e5cce7e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1020,16 +1020,18 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       }
 
       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
+        // There are no extending loads or truncating stores.
+        for (MVT InnerVT : MVT::fp_fixedlen_vector_valuetypes()) {
+          setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
+          setTruncStoreAction(VT, InnerVT, Expand);
+        }
+
         if (!useRVVForFixedLengthVectorVT(VT))
           continue;
 
         // By default everything must be expanded.
         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
           setOperationAction(Op, VT, Expand);
-        for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
-          setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
-          setTruncStoreAction(VT, OtherVT, Expand);
-        }
 
         // Custom lower fixed vector undefs to scalable vector undefs to avoid
         // expansion to a build_vector of 0s.

diff  --git a/llvm/test/CodeGen/RISCV/rvv/pr63596.ll b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
new file mode 100644
index 0000000000000..3f5de02c1e429
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
@@ -0,0 +1,53 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v -target-abi=lp64d | FileCheck %s
+
+define <4 x float> @foo(ptr %0) nounwind {
+; CHECK-LABEL: foo:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -48
+; CHECK-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    lhu s0, 6(a0)
+; CHECK-NEXT:    lhu s1, 4(a0)
+; CHECK-NEXT:    lhu s2, 0(a0)
+; CHECK-NEXT:    lhu a0, 2(a0)
+; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    fsw fa0, 8(sp)
+; CHECK-NEXT:    mv a0, s2
+; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    fsw fa0, 0(sp)
+; CHECK-NEXT:    mv a0, s1
+; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    fsw fa0, 12(sp)
+; CHECK-NEXT:    mv a0, s0
+; CHECK-NEXT:    call __extendhfsf2 at plt
+; CHECK-NEXT:    fsw fa0, 4(sp)
+; CHECK-NEXT:    addi a0, sp, 8
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    mv a0, sp
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
+; CHECK-NEXT:    addi a0, sp, 12
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    addi a0, sp, 4
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vslideup.vi v8, v9, 3
+; CHECK-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 48
+; CHECK-NEXT:    ret
+  %2 = load <4 x half>, ptr %0, align 2
+  %3 = fpext <4 x half> %2 to <4 x float>
+  ret <4 x float> %3
+}


        


More information about the llvm-commits mailing list