[llvm] [RISCV] Fix illegal build_vector when lowering double id buildvec on RV32 (PR #67017)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 3 06:45:05 PDT 2023
https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/67017
>From 4c4f2d0e256a51ecf69d908b8a43976f0eeb2e9e Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Thu, 21 Sep 2023 15:06:30 +0100
Subject: [PATCH] [RISCV] Fix illegal build_vector when lowering double id
buildvec on RV32
When lowering a constant build_vector sequence of doubles on RV32, if the
addend wasn't zero, or the step/denominator wasn't one, it would crash trying
to lower an illegal build_vector of <n x i64> with i32 operands, e.g:
t15: v2i64 = BUILD_VECTOR Constant:i32<1>, Constant:i32<1>
This patch fixes this by lowering the splats with SelectionDAG::getConstant
with the vector type, which handles making it legal via splat_vector_parts.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 10 +-
.../RISCV/rvv/fixed-vectors-fp-buildvec.ll | 129 ++++++++++++++++++
.../RISCV/rvv/fixed-vectors-int-buildvec.ll | 2 +-
3 files changed, 134 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e47168e10d8dc13..249dfa85ff6873e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3386,18 +3386,16 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
VID = convertFromScalableVector(VIDVT, VID, DAG, Subtarget);
if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
(StepOpcode == ISD::SHL && SplatStepVal != 0)) {
- SDValue SplatStep = DAG.getSplatBuildVector(
- VIDVT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
+ SDValue SplatStep = DAG.getConstant(SplatStepVal, DL, VIDVT);
VID = DAG.getNode(StepOpcode, DL, VIDVT, VID, SplatStep);
}
if (StepDenominator != 1) {
- SDValue SplatStep = DAG.getSplatBuildVector(
- VIDVT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
+ SDValue SplatStep =
+ DAG.getConstant(Log2_64(StepDenominator), DL, VIDVT);
VID = DAG.getNode(ISD::SRL, DL, VIDVT, VID, SplatStep);
}
if (Addend != 0 || Negate) {
- SDValue SplatAddend = DAG.getSplatBuildVector(
- VIDVT, DL, DAG.getConstant(Addend, DL, XLenVT));
+ SDValue SplatAddend = DAG.getConstant(Addend, DL, VIDVT);
VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VIDVT, SplatAddend,
VID);
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index 7593c1ab75ce420..d1ea56a1ff93819 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -1083,3 +1083,132 @@ define <2 x float> @signbits() {
entry:
ret <2 x float> <float 0x36A0000000000000, float 0.000000e+00>
}
+
+define <2 x half> @vid_v2f16() {
+; CHECK-LABEL: vid_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ ret <2 x half> <half 0.0, half 1.0>
+}
+
+define <2 x half> @vid_addend1_v2f16() {
+; CHECK-LABEL: vid_addend1_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vadd.vi v8, v8, 1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ ret <2 x half> <half 1.0, half 2.0>
+}
+
+define <2 x half> @vid_denominator2_v2f16() {
+; CHECK-LABEL: vid_denominator2_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI27_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI27_0)
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: ret
+ ret <2 x half> <half 0.5, half 1.0>
+}
+
+define <2 x half> @vid_step2_v2f16() {
+; CHECK-LABEL: vid_step2_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ ret <2 x half> <half 0.0, half 2.0>
+}
+
+define <2 x float> @vid_v2f32() {
+; CHECK-LABEL: vid_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ ret <2 x float> <float 0.0, float 1.0>
+}
+
+define <2 x float> @vid_addend1_v2f32() {
+; CHECK-LABEL: vid_addend1_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vadd.vi v8, v8, 1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ ret <2 x float> <float 1.0, float 2.0>
+}
+
+define <2 x float> @vid_denominator2_v2f32() {
+; CHECK-LABEL: vid_denominator2_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI31_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI31_0)
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: ret
+ ret <2 x float> <float 0.5, float 1.0>
+}
+
+define <2 x float> @vid_step2_v2f32() {
+; CHECK-LABEL: vid_step2_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ ret <2 x float> <float 0.0, float 2.0>
+}
+
+define <2 x double> @vid_v2f64() {
+; CHECK-LABEL: vid_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ ret <2 x double> <double 0.0, double 1.0>
+}
+
+define <2 x double> @vid_addend1_v2f64() {
+; CHECK-LABEL: vid_addend1_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vadd.vi v8, v8, 1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ ret <2 x double> <double 1.0, double 2.0>
+}
+
+define <2 x double> @vid_denominator2_v2f64() {
+; CHECK-LABEL: vid_denominator2_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI35_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI35_0)
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+ ret <2 x double> <double 0.5, double 1.0>
+}
+
+define <2 x double> @vid_step2_v2f64() {
+; CHECK-LABEL: vid_step2_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ ret <2 x double> <double 0.0, double 2.0>
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index e95978744c408e9..b648420aa2e03cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -259,7 +259,7 @@ define <4 x i8> @buildvec_vid_stepn3_add3_v4i8() {
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.i v9, 3
; CHECK-NEXT: vid.v v8
-; CHECK-NEXT: li a0, -3
+; CHECK-NEXT: li a0, 253
; CHECK-NEXT: vmadd.vx v8, a0, v9
; CHECK-NEXT: ret
ret <4 x i8> <i8 3, i8 0, i8 -3, i8 -6>
More information about the llvm-commits
mailing list