[llvm] [RISCV] custom scmp(x, 0) and scmp(0, x) lowering for RVV (PR #151753)
Olaf Bernstein via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 4 09:28:51 PDT 2025
https://github.com/camel-cdr updated https://github.com/llvm/llvm-project/pull/151753
>From de5c1c9bcf1ef2074ba8fafe4c134323b6ce352c Mon Sep 17 00:00:00 2001
From: Olaf Bernstein <camel-cdr at protonmail.com>
Date: Fri, 1 Aug 2025 20:44:17 +0200
Subject: [PATCH 1/2] [RISCV] add scmp RVV CodeGen tests
---
.../CodeGen/RISCV/rvv/fixed-vectors-scmp.ll | 189 ++++++++++++++++
llvm/test/CodeGen/RISCV/rvv/scmp.ll | 206 ++++++++++++++++++
2 files changed, 395 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scmp.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/scmp.ll
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scmp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scmp.ll
new file mode 100644
index 0000000000000..e954c74549f80
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scmp.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <16 x i8> @scmp_i8i8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: scmp_i8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <16 x i8> @llvm.scmp(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %c
+}
+
+define <16 x i8> @scmp_z8i8(<16 x i8> %a) {
+; CHECK-LABEL: scmp_z8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmsle.vi v0, v8, -1
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <16 x i8> @llvm.scmp(<16 x i8> zeroinitializer, <16 x i8> %a)
+ ret <16 x i8> %c
+}
+
+define <16 x i8> @scmp_i8z8(<16 x i8> %a) {
+; CHECK-LABEL: scmp_i8z8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmsle.vi v0, v8, -1
+; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <16 x i8> @llvm.scmp(<16 x i8> %a, <16 x i8> zeroinitializer)
+ ret <16 x i8> %c
+}
+
+
+define <8 x i16> @scmp_i16i16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: scmp_i16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <8 x i16> @llvm.scmp(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i16> %c
+}
+
+define <8 x i16> @scmp_z16i16(<8 x i16> %a) {
+; CHECK-LABEL: scmp_z16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmsle.vi v0, v8, -1
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <8 x i16> @llvm.scmp(<8 x i16> zeroinitializer, <8 x i16> %a)
+ ret <8 x i16> %c
+}
+
+define <8 x i16> @scmp_i16z16(<8 x i16> %a) {
+; CHECK-LABEL: scmp_i16z16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmsle.vi v0, v8, -1
+; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <8 x i16> @llvm.scmp(<8 x i16> %a, <8 x i16> zeroinitializer)
+ ret <8 x i16> %c
+}
+
+
+define <4 x i32> @scmp_i32i32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: scmp_i32i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <4 x i32> @llvm.scmp(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i32> %c
+}
+
+define <4 x i32> @scmp_z32i32(<4 x i32> %a) {
+; CHECK-LABEL: scmp_z32i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmsle.vi v0, v8, -1
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <4 x i32> @llvm.scmp(<4 x i32> zeroinitializer, <4 x i32> %a)
+ ret <4 x i32> %c
+}
+
+define <4 x i32> @scmp_i32z32(<4 x i32> %a) {
+; CHECK-LABEL: scmp_i32z32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmsle.vi v0, v8, -1
+; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <4 x i32> @llvm.scmp(<4 x i32> %a, <4 x i32> zeroinitializer)
+ ret <4 x i32> %c
+}
+
+
+define <2 x i64> @scmp_i64i64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: scmp_i64i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <2 x i64> @llvm.scmp(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i64> %c
+}
+
+define <2 x i64> @scmp_z64i64(<2 x i64> %a) {
+; CHECK-LABEL: scmp_z64i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmsle.vi v0, v8, -1
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <2 x i64> @llvm.scmp(<2 x i64> zeroinitializer, <2 x i64> %a)
+ ret <2 x i64> %c
+}
+
+define <2 x i64> @scmp_i64z64(<2 x i64> %a) {
+; CHECK-LABEL: scmp_i64z64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmsle.vi v0, v8, -1
+; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <2 x i64> @llvm.scmp(<2 x i64> %a, <2 x i64> zeroinitializer)
+ ret <2 x i64> %c
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/scmp.ll b/llvm/test/CodeGen/RISCV/rvv/scmp.ll
new file mode 100644
index 0000000000000..aaac9c03bb2c0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/scmp.ll
@@ -0,0 +1,206 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 16 x i8> @scmp_i8i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: scmp_i8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmslt.vv v0, v10, v8
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT: vmslt.vv v0, v8, v10
+; CHECK-NEXT: vmerge.vim v8, v12, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <vscale x 16 x i8> @llvm.scmp(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
+ ret <vscale x 16 x i8> %c
+}
+
+define <vscale x 16 x i8> @scmp_z8i8(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: scmp_z8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vsrl.vi v8, v8, 7
+; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <vscale x 16 x i8> @llvm.scmp(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> %a)
+ ret <vscale x 16 x i8> %c
+}
+
+define <vscale x 16 x i8> @scmp_i8z8(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: scmp_i8z8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: vmsle.vi v0, v8, -1
+; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <vscale x 16 x i8> @llvm.scmp(<vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer)
+ ret <vscale x 16 x i8> %c
+}
+
+
+define <vscale x 8 x i16> @scmp_i16i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: scmp_i16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmslt.vv v0, v10, v8
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT: vmslt.vv v0, v8, v10
+; CHECK-NEXT: vmerge.vim v8, v12, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <vscale x 8 x i16> @llvm.scmp(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+ ret <vscale x 8 x i16> %c
+}
+
+define <vscale x 8 x i16> @scmp_z16i16(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: scmp_z16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vsrl.vi v8, v8, 15
+; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <vscale x 8 x i16> @llvm.scmp(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> %a)
+ ret <vscale x 8 x i16> %c
+}
+
+define <vscale x 8 x i16> @scmp_i16z16(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: scmp_i16z16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: vmsle.vi v0, v8, -1
+; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <vscale x 8 x i16> @llvm.scmp(<vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer)
+ ret <vscale x 8 x i16> %c
+}
+
+
+define <vscale x 4 x i32> @scmp_i32i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: scmp_i32i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmslt.vv v0, v10, v8
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT: vmslt.vv v0, v8, v10
+; CHECK-NEXT: vmerge.vim v8, v12, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <vscale x 4 x i32> @llvm.scmp(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+ ret <vscale x 4 x i32> %c
+}
+
+define <vscale x 4 x i32> @scmp_z32i32(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: scmp_z32i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmsgt.vi v0, v8, 0
+; CHECK-NEXT: vsrl.vi v8, v8, 31
+; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <vscale x 4 x i32> @llvm.scmp(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> %a)
+ ret <vscale x 4 x i32> %c
+}
+
+define <vscale x 4 x i32> @scmp_i32z32(<vscale x 4 x i32> %a) {
+; RV32-LABEL: scmp_i32z32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu
+; RV32-NEXT: vmsgt.vi v0, v8, 0
+; RV32-NEXT: vsra.vi v8, v8, 31
+; RV32-NEXT: vor.vi v8, v8, 1, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: scmp_i32z32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vmsgt.vi v0, v8, 0
+; RV64-NEXT: vmv.v.i v10, 0
+; RV64-NEXT: vmerge.vim v10, v10, 1, v0
+; RV64-NEXT: vmsle.vi v0, v8, -1
+; RV64-NEXT: vmerge.vim v8, v10, -1, v0
+; RV64-NEXT: ret
+entry:
+ %c = call <vscale x 4 x i32> @llvm.scmp(<vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 4 x i32> %c
+}
+
+
+define <vscale x 2 x i64> @scmp_i64i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: scmp_i64i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vmslt.vv v0, v10, v8
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT: vmslt.vv v0, v8, v10
+; CHECK-NEXT: vmerge.vim v8, v12, -1, v0
+; CHECK-NEXT: ret
+entry:
+ %c = call <vscale x 2 x i64> @llvm.scmp(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
+ ret <vscale x 2 x i64> %c
+}
+
+define <vscale x 2 x i64> @scmp_z64i64(<vscale x 2 x i64> %a) {
+; RV32-LABEL: scmp_z64i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32-NEXT: vmsle.vi v0, v8, -1
+; RV32-NEXT: vmv.v.i v10, 0
+; RV32-NEXT: vmerge.vim v10, v10, 1, v0
+; RV32-NEXT: vmsgt.vi v0, v8, 0
+; RV32-NEXT: vmerge.vim v8, v10, -1, v0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: scmp_z64i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a0, 63
+; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV64-NEXT: vmsgt.vi v0, v8, 0
+; RV64-NEXT: vsrl.vx v8, v8, a0
+; RV64-NEXT: vmerge.vim v8, v8, -1, v0
+; RV64-NEXT: ret
+entry:
+ %c = call <vscale x 2 x i64> @llvm.scmp(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i64> %a)
+ ret <vscale x 2 x i64> %c
+}
+
+define <vscale x 2 x i64> @scmp_i64z64(<vscale x 2 x i64> %a) {
+; RV32-LABEL: scmp_i64z64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32-NEXT: vmsgt.vi v0, v8, 0
+; RV32-NEXT: vmv.v.i v10, 0
+; RV32-NEXT: vmerge.vim v10, v10, 1, v0
+; RV32-NEXT: vmsle.vi v0, v8, -1
+; RV32-NEXT: vmerge.vim v8, v10, -1, v0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: scmp_i64z64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a0, 63
+; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
+; RV64-NEXT: vmsgt.vi v0, v8, 0
+; RV64-NEXT: vsra.vx v8, v8, a0
+; RV64-NEXT: vor.vi v8, v8, 1, v0.t
+; RV64-NEXT: ret
+entry:
+ %c = call <vscale x 2 x i64> @llvm.scmp(<vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer)
+ ret <vscale x 2 x i64> %c
+}
>From 65026b3bb444c83ff0a19bc5238fece8af3e6a4a Mon Sep 17 00:00:00 2001
From: Olaf Bernstein <camel-cdr at protonmail.com>
Date: Fri, 1 Aug 2025 22:38:50 +0200
Subject: [PATCH 2/2] [RISCV] custom scmp(x,0) and scmp(0,x) lowering for RVV
The current codegen for scmp(x,0) and scmp(0,x), also known as sign(x)
and -sign(x), isn't optimal for RVV.
It produces a four instruction sequence of for instructions
vmsgt.vi + vmslt.vi + vmerge.vim + vmerge.vim
for SEW<=32 and three instructions for SEW=64.
scmp(0,x): vmsgt.vi + vsra.vx + vor.vi
scmp(x,0): vmsgt.vi + vsrl.vx + vmerge.vim
This patch introduces a new lowering for all values of SEW which
expresses the above in SelectionDAG Nodes.
This maps to two arithmetic instructions and a vector register move:
scmp(0,x): vmv.v.i/v + vmsgt.vi + masked vsra.vi/vx
scmp(x,0): vmv.v.i/v + vmsgt.vi + masked vsrl.vi/vx
These clobber v0, need to have a different destination than the input
and need to use an additional GPR for SEW=64.
For the SEW<=32 and scmp(x,0) case a slightly different
lowering was chooses:
scmp(x,0): vmin.vx + vsra.i + vor.vv
This doesn't clobber v0, but uses a single GPR.
We deemed using a single GPR slightly better than clobbering v0
(SEW<=32), but using two GPRs as worse than using one GPR and
clobbering v0.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 32 +++++
.../CodeGen/RISCV/rvv/fixed-vectors-scmp.ll | 119 ++++++++++--------
llvm/test/CodeGen/RISCV/rvv/scmp.ll | 90 +++++++------
3 files changed, 139 insertions(+), 102 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index adbfbeb4669e7..2b4764f078141 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -880,6 +880,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
Legal);
+ setOperationAction(ISD::SCMP, VT, Custom);
setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
// Custom-lower extensions and truncations from/to mask types.
@@ -1361,6 +1362,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(
{ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);
+ setOperationAction(ISD::SCMP, VT, Custom);
setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
// vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
@@ -8223,6 +8225,36 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::SADDSAT:
case ISD::SSUBSAT:
return lowerToScalableOp(Op, DAG);
+ case ISD::SCMP: {
+ SDLoc DL(Op);
+ EVT VT = Op->getValueType(0);
+ SDValue LHS = Op->getOperand(0);
+ SDValue RHS = Op->getOperand(1);
+ unsigned SEW = VT.getScalarSizeInBits();
+ EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
+
+ SDValue Shift = DAG.getConstant(SEW - 1, DL, VT);
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue One = DAG.getConstant(1, DL, VT);
+ SDValue MinusOne = DAG.getAllOnesConstant(DL, VT);
+
+ if (ISD::isConstantSplatVectorAllZeros(RHS.getNode())) {
+ // scmp(lhs, 0) -> vor.vv(vsra.vi/vx(lhs,SEW-1), vmin.vx(lhs,1))
+ LHS = DAG.getFreeze(LHS);
+ SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, LHS, Shift);
+ SDValue Min = DAG.getNode(ISD::SMIN, DL, VT, LHS, One);
+ return DAG.getNode(ISD::OR, DL, VT, Sra, Min);
+ }
+ if (ISD::isConstantSplatVectorAllZeros(LHS.getNode())) {
+ // scmp(0, rhs) -> vmerge.vi(vmsgt.vi(rhs,0), vsrl.vi/vx(rhs,SEW-1), -1)
+ RHS = DAG.getFreeze(RHS);
+ SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, RHS, Shift);
+ SDValue Setcc = DAG.getSetCC(DL, CCVT, RHS, Zero, ISD::SETLE);
+ return DAG.getSelect(DL, VT, Setcc, Srl, MinusOne);
+ }
+
+ return SDValue();
+ }
case ISD::ABDS:
case ISD::ABDU: {
SDLoc dl(Op);
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scmp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scmp.ll
index e954c74549f80..1950e790c455b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scmp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scmp.ll
@@ -20,12 +20,11 @@ entry:
define <16 x i8> @scmp_z8i8(<16 x i8> %a) {
; CHECK-LABEL: scmp_z8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -1
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT: vmsle.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v9, -1
+; CHECK-NEXT: vsrl.vi v9, v8, 7, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%c = call <16 x i8> @llvm.scmp(<16 x i8> zeroinitializer, <16 x i8> %a)
@@ -35,12 +34,11 @@ entry:
define <16 x i8> @scmp_i8z8(<16 x i8> %a) {
; CHECK-LABEL: scmp_i8z8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vmsle.vi v0, v8, -1
-; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: vmin.vx v9, v8, a0
+; CHECK-NEXT: vsra.vi v8, v8, 7
+; CHECK-NEXT: vor.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
%c = call <16 x i8> @llvm.scmp(<16 x i8> %a, <16 x i8> zeroinitializer)
@@ -66,12 +64,11 @@ entry:
define <8 x i16> @scmp_z16i16(<8 x i16> %a) {
; CHECK-LABEL: scmp_z16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -1
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT: vmsle.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v9, -1
+; CHECK-NEXT: vsrl.vi v9, v8, 15, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%c = call <8 x i16> @llvm.scmp(<8 x i16> zeroinitializer, <8 x i16> %a)
@@ -81,12 +78,11 @@ entry:
define <8 x i16> @scmp_i16z16(<8 x i16> %a) {
; CHECK-LABEL: scmp_i16z16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vmsle.vi v0, v8, -1
-; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: vmin.vx v9, v8, a0
+; CHECK-NEXT: vsra.vi v8, v8, 15
+; CHECK-NEXT: vor.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
%c = call <8 x i16> @llvm.scmp(<8 x i16> %a, <8 x i16> zeroinitializer)
@@ -112,12 +108,11 @@ entry:
define <4 x i32> @scmp_z32i32(<4 x i32> %a) {
; CHECK-LABEL: scmp_z32i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -1
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT: vmsle.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v9, -1
+; CHECK-NEXT: vsrl.vi v9, v8, 31, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
entry:
%c = call <4 x i32> @llvm.scmp(<4 x i32> zeroinitializer, <4 x i32> %a)
@@ -127,12 +122,11 @@ entry:
define <4 x i32> @scmp_i32z32(<4 x i32> %a) {
; CHECK-LABEL: scmp_i32z32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vmsle.vi v0, v8, -1
-; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
+; CHECK-NEXT: vmin.vx v9, v8, a0
+; CHECK-NEXT: vsra.vi v8, v8, 31
+; CHECK-NEXT: vor.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
%c = call <4 x i32> @llvm.scmp(<4 x i32> %a, <4 x i32> zeroinitializer)
@@ -156,34 +150,51 @@ entry:
}
define <2 x i64> @scmp_z64i64(<2 x i64> %a) {
-; CHECK-LABEL: scmp_z64i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmsle.vi v0, v8, -1
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
-; CHECK-NEXT: ret
+; RV32-LABEL: scmp_z64i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vmsle.vi v0, v8, -1
+; RV32-NEXT: vmv.v.i v9, 0
+; RV32-NEXT: vmerge.vim v9, v9, 1, v0
+; RV32-NEXT: vmsgt.vi v0, v8, 0
+; RV32-NEXT: vmerge.vim v8, v9, -1, v0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: scmp_z64i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a0, 63
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT: vmsle.vi v0, v8, 0
+; RV64-NEXT: vmv.v.i v9, -1
+; RV64-NEXT: vsrl.vx v9, v8, a0, v0.t
+; RV64-NEXT: vmv.v.v v8, v9
+; RV64-NEXT: ret
entry:
%c = call <2 x i64> @llvm.scmp(<2 x i64> zeroinitializer, <2 x i64> %a)
ret <2 x i64> %c
}
define <2 x i64> @scmp_i64z64(<2 x i64> %a) {
-; CHECK-LABEL: scmp_i64z64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vmsle.vi v0, v8, -1
-; CHECK-NEXT: vmerge.vim v8, v9, -1, v0
-; CHECK-NEXT: ret
+; RV32-LABEL: scmp_i64z64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vmsgt.vi v0, v8, 0
+; RV32-NEXT: vmv.v.i v9, 0
+; RV32-NEXT: vmerge.vim v9, v9, 1, v0
+; RV32-NEXT: vmsle.vi v0, v8, -1
+; RV32-NEXT: vmerge.vim v8, v9, -1, v0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: scmp_i64z64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a0, 1
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vmin.vx v9, v8, a0
+; RV64-NEXT: li a0, 63
+; RV64-NEXT: vsra.vx v8, v8, a0
+; RV64-NEXT: vor.vv v8, v8, v9
+; RV64-NEXT: ret
entry:
%c = call <2 x i64> @llvm.scmp(<2 x i64> %a, <2 x i64> zeroinitializer)
ret <2 x i64> %c
}
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; RV32: {{.*}}
-; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/scmp.ll b/llvm/test/CodeGen/RISCV/rvv/scmp.ll
index aaac9c03bb2c0..1fb1962df80b7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/scmp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/scmp.ll
@@ -20,10 +20,11 @@ entry:
define <vscale x 16 x i8> @scmp_z8i8(<vscale x 16 x i8> %a) {
; CHECK-LABEL: scmp_z8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vsrl.vi v8, v8, 7
-; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
+; CHECK-NEXT: vmsle.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vsrl.vi v10, v8, 7, v0.t
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
entry:
%c = call <vscale x 16 x i8> @llvm.scmp(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> %a)
@@ -33,12 +34,11 @@ entry:
define <vscale x 16 x i8> @scmp_i8z8(<vscale x 16 x i8> %a) {
; CHECK-LABEL: scmp_i8z8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT: vmsle.vi v0, v8, -1
-; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmin.vx v10, v8, a0
+; CHECK-NEXT: vsra.vi v8, v8, 7
+; CHECK-NEXT: vor.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
%c = call <vscale x 16 x i8> @llvm.scmp(<vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer)
@@ -64,10 +64,11 @@ entry:
define <vscale x 8 x i16> @scmp_z16i16(<vscale x 8 x i16> %a) {
; CHECK-LABEL: scmp_z16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vsrl.vi v8, v8, 15
-; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
+; CHECK-NEXT: vmsle.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vsrl.vi v10, v8, 15, v0.t
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
entry:
%c = call <vscale x 8 x i16> @llvm.scmp(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> %a)
@@ -77,12 +78,11 @@ entry:
define <vscale x 8 x i16> @scmp_i16z16(<vscale x 8 x i16> %a) {
; CHECK-LABEL: scmp_i16z16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT: vmsle.vi v0, v8, -1
-; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmin.vx v10, v8, a0
+; CHECK-NEXT: vsra.vi v8, v8, 15
+; CHECK-NEXT: vor.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
%c = call <vscale x 8 x i16> @llvm.scmp(<vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer)
@@ -108,10 +108,11 @@ entry:
define <vscale x 4 x i32> @scmp_z32i32(<vscale x 4 x i32> %a) {
; CHECK-LABEL: scmp_z32i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT: vmsgt.vi v0, v8, 0
-; CHECK-NEXT: vsrl.vi v8, v8, 31
-; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT: vmsle.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vsrl.vi v10, v8, 31, v0.t
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
entry:
%c = call <vscale x 4 x i32> @llvm.scmp(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> %a)
@@ -119,23 +120,14 @@ entry:
}
define <vscale x 4 x i32> @scmp_i32z32(<vscale x 4 x i32> %a) {
-; RV32-LABEL: scmp_i32z32:
-; RV32: # %bb.0: # %entry
-; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu
-; RV32-NEXT: vmsgt.vi v0, v8, 0
-; RV32-NEXT: vsra.vi v8, v8, 31
-; RV32-NEXT: vor.vi v8, v8, 1, v0.t
-; RV32-NEXT: ret
-;
-; RV64-LABEL: scmp_i32z32:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; RV64-NEXT: vmsgt.vi v0, v8, 0
-; RV64-NEXT: vmv.v.i v10, 0
-; RV64-NEXT: vmerge.vim v10, v10, 1, v0
-; RV64-NEXT: vmsle.vi v0, v8, -1
-; RV64-NEXT: vmerge.vim v8, v10, -1, v0
-; RV64-NEXT: ret
+; CHECK-LABEL: scmp_i32z32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmin.vx v10, v8, a0
+; CHECK-NEXT: vsra.vi v8, v8, 31
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: ret
entry:
%c = call <vscale x 4 x i32> @llvm.scmp(<vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 4 x i32> %c
@@ -171,10 +163,11 @@ define <vscale x 2 x i64> @scmp_z64i64(<vscale x 2 x i64> %a) {
; RV64-LABEL: scmp_z64i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: li a0, 63
-; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV64-NEXT: vmsgt.vi v0, v8, 0
-; RV64-NEXT: vsrl.vx v8, v8, a0
-; RV64-NEXT: vmerge.vim v8, v8, -1, v0
+; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
+; RV64-NEXT: vmsle.vi v0, v8, 0
+; RV64-NEXT: vmv.v.i v10, -1
+; RV64-NEXT: vsrl.vx v10, v8, a0, v0.t
+; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
entry:
%c = call <vscale x 2 x i64> @llvm.scmp(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i64> %a)
@@ -194,11 +187,12 @@ define <vscale x 2 x i64> @scmp_i64z64(<vscale x 2 x i64> %a) {
;
; RV64-LABEL: scmp_i64z64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a0, 1
+; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV64-NEXT: vmin.vx v10, v8, a0
; RV64-NEXT: li a0, 63
-; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
-; RV64-NEXT: vmsgt.vi v0, v8, 0
; RV64-NEXT: vsra.vx v8, v8, a0
-; RV64-NEXT: vor.vi v8, v8, 1, v0.t
+; RV64-NEXT: vor.vv v8, v8, v10
; RV64-NEXT: ret
entry:
%c = call <vscale x 2 x i64> @llvm.scmp(<vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer)
More information about the llvm-commits
mailing list