[llvm] 2dd20a3 - [ValueTypes] Fix scalable-vector changeExtendedVectorTypeToInteger
Fraser Cormack via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 3 01:45:04 PDT 2021
Author: Fraser Cormack
Date: 2021-06-03T09:36:56+01:00
New Revision: 2dd20a31f27e3123f5f324c3a80092ad3f80ef05
URL: https://github.com/llvm/llvm-project/commit/2dd20a31f27e3123f5f324c3a80092ad3f80ef05
DIFF: https://github.com/llvm/llvm-project/commit/2dd20a31f27e3123f5f324c3a80092ad3f80ef05.diff
LOG: [ValueTypes] Fix scalable-vector changeExtendedVectorTypeToInteger
The attached tests check for the regression in DAGCombiner's
`visitVSELECT`, which may call this method.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D103534
Added:
Modified:
llvm/lib/CodeGen/ValueTypes.cpp
llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp
index 4d496e54bc946..1672e5f77c768 100644
--- a/llvm/lib/CodeGen/ValueTypes.cpp
+++ b/llvm/lib/CodeGen/ValueTypes.cpp
@@ -24,8 +24,7 @@ EVT EVT::changeExtendedVectorElementTypeToInteger() const {
assert(isExtended() && "Type is not extended!");
LLVMContext &Context = LLVMTy->getContext();
EVT IntTy = getIntegerVT(Context, getScalarSizeInBits());
- return getVectorVT(Context, IntTy, getVectorNumElements(),
- isScalableVector());
+ return getVectorVT(Context, IntTy, getVectorElementCount());
}
EVT EVT::changeExtendedVectorElementType(EVT EltVT) const {
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
index b2a7b2cff2cdf..ead95d14eb0e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
+; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
; RUN: -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x half> @vfmerge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %cond) {
@@ -386,3 +386,37 @@ define <vscale x 8 x double> @vfmerge_zv_nxv8f64(<vscale x 8 x double> %va, <vsc
%vc = select <vscale x 8 x i1> %cond, <vscale x 8 x double> %splat, <vscale x 8 x double> %va
ret <vscale x 8 x double> %vc
}
+
+define <vscale x 16 x double> @vselect_combine_regression(<vscale x 16 x i64> %va, <vscale x 16 x double> %vb) {
+; CHECK-LABEL: vselect_combine_regression:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: vl8re64.v v24, (a1)
+; CHECK-NEXT: vl8re64.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v1, v16, 0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
+; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %cond = icmp eq <vscale x 16 x i64> %va, zeroinitializer
+ %sel = select <vscale x 16 x i1> %cond, <vscale x 16 x double> %vb, <vscale x 16 x double> zeroinitializer
+ ret <vscale x 16 x double> %sel
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
index 6200c75a14599..fb61097129e8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \
+; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+experimental-zfh,+experimental-v -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x half> @vfmerge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %cond) {
@@ -387,3 +387,36 @@ define <vscale x 8 x double> @vfmerge_zv_nxv8f64(<vscale x 8 x double> %va, <vsc
ret <vscale x 8 x double> %vc
}
+define <vscale x 16 x double> @vselect_combine_regression(<vscale x 16 x i64> %va, <vscale x 16 x double> %vb) {
+; CHECK-LABEL: vselect_combine_regression:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: vl8re64.v v24, (a1)
+; CHECK-NEXT: vl8re64.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v1, v16, 0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
+; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %cond = icmp eq <vscale x 16 x i64> %va, zeroinitializer
+ %sel = select <vscale x 16 x i1> %cond, <vscale x 16 x double> %vb, <vscale x 16 x double> zeroinitializer
+ ret <vscale x 16 x double> %sel
+}
More information about the llvm-commits
mailing list