[llvm] 95075d3 - [RISCV][test] Add RV32I and RV64I RUN lines to condops.ll test
Alex Bradbury via llvm-commits
llvm-commits at lists.llvm.org
Fri Jul 14 05:30:06 PDT 2023
Author: Alex Bradbury
Date: 2023-07-14T13:29:40+01:00
New Revision: 95075d3d2c66ee63dbe697beacf57e8f43bb3997
URL: https://github.com/llvm/llvm-project/commit/95075d3d2c66ee63dbe697beacf57e8f43bb3997
DIFF: https://github.com/llvm/llvm-project/commit/95075d3d2c66ee63dbe697beacf57e8f43bb3997.diff
LOG: [RISCV][test] Add RV32I and RV64I RUN lines to condops.ll test
Some of these test cases will be changed by upcoming combines, even in
the non-zicond case.
Added:
Modified:
llvm/test/CodeGen/RISCV/condops.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/condops.ll b/llvm/test/CodeGen/RISCV/condops.ll
index f5907321078f72..e43fa4f9acc690 100644
--- a/llvm/test/CodeGen/RISCV/condops.ll
+++ b/llvm/test/CodeGen/RISCV/condops.ll
@@ -1,10 +1,25 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv32 < %s | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 < %s | FileCheck %s -check-prefix=RV64I
; RUN: llc -mtriple=riscv64 -mattr=+xventanacondops < %s | FileCheck %s -check-prefix=RV64XVENTANACONDOPS
; RUN: llc -mtriple=riscv64 -mattr=+xtheadcondmov < %s | FileCheck %s -check-prefix=RV64XTHEADCONDMOV
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicond < %s | FileCheck %s -check-prefixes=RV32ZICOND
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicond < %s | FileCheck %s -check-prefixes=RV64ZICOND
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicond < %s | FileCheck %s -check-prefix=RV32ZICOND
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicond < %s | FileCheck %s -check-prefix=RV64ZICOND
define i64 @zero1(i64 %rs1, i1 zeroext %rc) {
+; RV32I-LABEL: zero1:
+; RV32I: # %bb.0:
+; RV32I-NEXT: neg a2, a2
+; RV32I-NEXT: and a0, a2, a0
+; RV32I-NEXT: and a1, a2, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero1:
+; RV64I: # %bb.0:
+; RV64I-NEXT: neg a1, a1
+; RV64I-NEXT: and a0, a1, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero1:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a0, a1
@@ -30,6 +45,19 @@ define i64 @zero1(i64 %rs1, i1 zeroext %rc) {
}
define i64 @zero2(i64 %rs1, i1 zeroext %rc) {
+; RV32I-LABEL: zero2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a2, a2, -1
+; RV32I-NEXT: and a0, a2, a0
+; RV32I-NEXT: and a1, a2, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a1, a1, -1
+; RV64I-NEXT: and a0, a1, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero2:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a1
@@ -55,6 +83,24 @@ define i64 @zero2(i64 %rs1, i1 zeroext %rc) {
}
define i64 @add1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: add1:
+; RV32I: # %bb.0:
+; RV32I-NEXT: neg a0, a0
+; RV32I-NEXT: and a4, a0, a4
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: and a0, a0, a3
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: sltu a1, a0, a1
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add1:
+; RV64I: # %bb.0:
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: add1:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
@@ -88,6 +134,24 @@ define i64 @add1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @add2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: add2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: neg a0, a0
+; RV32I-NEXT: and a2, a0, a2
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: sltu a1, a0, a3
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: add a0, a2, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: add2:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
@@ -121,6 +185,24 @@ define i64 @add2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @add3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: add3:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: and a4, a0, a4
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: and a0, a0, a3
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: sltu a1, a0, a1
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: add3:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
@@ -154,6 +236,24 @@ define i64 @add3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @add4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: add4:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: and a2, a0, a2
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: sltu a1, a0, a3
+; RV32I-NEXT: add a1, a2, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add4:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: add a0, a2, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: add4:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
@@ -187,6 +287,25 @@ define i64 @add4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @sub1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: sub1:
+; RV32I: # %bb.0:
+; RV32I-NEXT: neg a0, a0
+; RV32I-NEXT: and a3, a0, a3
+; RV32I-NEXT: sltu a5, a1, a3
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: sub a2, a2, a0
+; RV32I-NEXT: sub a2, a2, a5
+; RV32I-NEXT: sub a0, a1, a3
+; RV32I-NEXT: mv a1, a2
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: sub1:
+; RV64I: # %bb.0:
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: sub a0, a1, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: sub1:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
@@ -221,6 +340,25 @@ define i64 @sub1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @sub2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: sub2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: and a3, a0, a3
+; RV32I-NEXT: sltu a5, a1, a3
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: sub a2, a2, a0
+; RV32I-NEXT: sub a2, a2, a5
+; RV32I-NEXT: sub a0, a1, a3
+; RV32I-NEXT: mv a1, a2
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: sub2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: sub a0, a1, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: sub2:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
@@ -255,6 +393,22 @@ define i64 @sub2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @or1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: or1:
+; RV32I: # %bb.0:
+; RV32I-NEXT: neg a5, a0
+; RV32I-NEXT: and a0, a5, a3
+; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: and a1, a5, a4
+; RV32I-NEXT: or a1, a2, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: or1:
+; RV64I: # %bb.0:
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: or1:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
@@ -287,6 +441,22 @@ define i64 @or1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @or2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: or2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: neg a5, a0
+; RV32I-NEXT: and a0, a5, a1
+; RV32I-NEXT: or a0, a3, a0
+; RV32I-NEXT: and a1, a5, a2
+; RV32I-NEXT: or a1, a4, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: or2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: or a0, a2, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: or2:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
@@ -319,6 +489,22 @@ define i64 @or2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @or3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: or3:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a5, a0, -1
+; RV32I-NEXT: and a0, a5, a3
+; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: and a1, a5, a4
+; RV32I-NEXT: or a1, a2, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: or3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: or3:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
@@ -351,6 +537,22 @@ define i64 @or3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @or4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: or4:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a5, a0, -1
+; RV32I-NEXT: and a0, a5, a1
+; RV32I-NEXT: or a0, a3, a0
+; RV32I-NEXT: and a1, a5, a2
+; RV32I-NEXT: or a1, a4, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: or4:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: or a0, a2, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: or4:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
@@ -383,6 +585,22 @@ define i64 @or4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @xor1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: xor1:
+; RV32I: # %bb.0:
+; RV32I-NEXT: neg a5, a0
+; RV32I-NEXT: and a0, a5, a3
+; RV32I-NEXT: xor a0, a1, a0
+; RV32I-NEXT: and a1, a5, a4
+; RV32I-NEXT: xor a1, a2, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: xor1:
+; RV64I: # %bb.0:
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: xor a0, a1, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: xor1:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
@@ -415,6 +633,22 @@ define i64 @xor1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @xor2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: xor2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: neg a5, a0
+; RV32I-NEXT: and a0, a5, a1
+; RV32I-NEXT: xor a0, a3, a0
+; RV32I-NEXT: and a1, a5, a2
+; RV32I-NEXT: xor a1, a4, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: xor2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: xor a0, a2, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: xor2:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
@@ -447,6 +681,22 @@ define i64 @xor2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @xor3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: xor3:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a5, a0, -1
+; RV32I-NEXT: and a0, a5, a3
+; RV32I-NEXT: xor a0, a1, a0
+; RV32I-NEXT: and a1, a5, a4
+; RV32I-NEXT: xor a1, a2, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: xor3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: xor a0, a1, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: xor3:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
@@ -479,6 +729,22 @@ define i64 @xor3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @xor4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: xor4:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a5, a0, -1
+; RV32I-NEXT: and a0, a5, a1
+; RV32I-NEXT: xor a0, a3, a0
+; RV32I-NEXT: and a1, a5, a2
+; RV32I-NEXT: xor a1, a4, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: xor4:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: xor a0, a2, a0
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: xor4:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
@@ -511,6 +777,26 @@ define i64 @xor4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @and1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: and1:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beqz a0, .LBB16_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a2, a2, a4
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: .LBB16_2:
+; RV32I-NEXT: mv a0, a1
+; RV32I-NEXT: mv a1, a2
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: and1:
+; RV64I: # %bb.0:
+; RV64I-NEXT: beqz a0, .LBB16_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: .LBB16_2:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: and1:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
@@ -548,6 +834,26 @@ define i64 @and1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @and2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: and2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beqz a0, .LBB17_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a4, a2, a4
+; RV32I-NEXT: and a3, a1, a3
+; RV32I-NEXT: .LBB17_2:
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: mv a1, a4
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: and2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: beqz a0, .LBB17_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: and a2, a1, a2
+; RV64I-NEXT: .LBB17_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: and2:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0
@@ -585,6 +891,26 @@ define i64 @and2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @and3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: and3:
+; RV32I: # %bb.0:
+; RV32I-NEXT: bnez a0, .LBB18_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a2, a2, a4
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: .LBB18_2:
+; RV32I-NEXT: mv a0, a1
+; RV32I-NEXT: mv a1, a2
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: and3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bnez a0, .LBB18_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: .LBB18_2:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: and3:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
@@ -622,6 +948,26 @@ define i64 @and3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @and4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: and4:
+; RV32I: # %bb.0:
+; RV32I-NEXT: bnez a0, .LBB19_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a4, a2, a4
+; RV32I-NEXT: and a3, a1, a3
+; RV32I-NEXT: .LBB19_2:
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: mv a1, a4
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: and4:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bnez a0, .LBB19_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: and a2, a1, a2
+; RV64I-NEXT: .LBB19_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: and4:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0
@@ -659,6 +1005,26 @@ define i64 @and4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @basic(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: basic:
+; RV32I: # %bb.0:
+; RV32I-NEXT: bnez a0, .LBB20_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a1, a3
+; RV32I-NEXT: mv a2, a4
+; RV32I-NEXT: .LBB20_2:
+; RV32I-NEXT: mv a0, a1
+; RV32I-NEXT: mv a1, a2
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: basic:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bnez a0, .LBB20_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a1, a2
+; RV64I-NEXT: .LBB20_2:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: basic:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a2, a2, a0
@@ -694,6 +1060,29 @@ define i64 @basic(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
}
define i64 @seteq(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: seteq:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: or a2, a0, a1
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: beqz a2, .LBB21_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a0, a6
+; RV32I-NEXT: mv a1, a7
+; RV32I-NEXT: .LBB21_2:
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: seteq:
+; RV64I: # %bb.0:
+; RV64I-NEXT: beq a0, a1, .LBB21_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a2, a3
+; RV64I-NEXT: .LBB21_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: seteq:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
@@ -735,6 +1124,29 @@ define i64 @seteq(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
}
define i64 @setne(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setne:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: or a2, a0, a1
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: bnez a2, .LBB22_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a0, a6
+; RV32I-NEXT: mv a1, a7
+; RV32I-NEXT: .LBB22_2:
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setne:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bne a0, a1, .LBB22_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a2, a3
+; RV64I-NEXT: .LBB22_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setne:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
@@ -776,6 +1188,33 @@ define i64 @setne(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
}
define i64 @setgt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setgt:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beq a1, a3, .LBB23_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: slt a0, a3, a1
+; RV32I-NEXT: beqz a0, .LBB23_3
+; RV32I-NEXT: j .LBB23_4
+; RV32I-NEXT: .LBB23_2:
+; RV32I-NEXT: sltu a0, a2, a0
+; RV32I-NEXT: bnez a0, .LBB23_4
+; RV32I-NEXT: .LBB23_3:
+; RV32I-NEXT: mv a4, a6
+; RV32I-NEXT: mv a5, a7
+; RV32I-NEXT: .LBB23_4:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setgt:
+; RV64I: # %bb.0:
+; RV64I-NEXT: blt a1, a0, .LBB23_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a2, a3
+; RV64I-NEXT: .LBB23_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setgt:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: slt a0, a1, a0
@@ -820,6 +1259,35 @@ define i64 @setgt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
}
define i64 @setge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setge:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beq a1, a3, .LBB24_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: slt a0, a1, a3
+; RV32I-NEXT: xori a0, a0, 1
+; RV32I-NEXT: beqz a0, .LBB24_3
+; RV32I-NEXT: j .LBB24_4
+; RV32I-NEXT: .LBB24_2:
+; RV32I-NEXT: sltu a0, a0, a2
+; RV32I-NEXT: xori a0, a0, 1
+; RV32I-NEXT: bnez a0, .LBB24_4
+; RV32I-NEXT: .LBB24_3:
+; RV32I-NEXT: mv a4, a6
+; RV32I-NEXT: mv a5, a7
+; RV32I-NEXT: .LBB24_4:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setge:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bge a0, a1, .LBB24_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a2, a3
+; RV64I-NEXT: .LBB24_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setge:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: slt a0, a0, a1
@@ -867,6 +1335,33 @@ define i64 @setge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
}
define i64 @setlt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setlt:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beq a1, a3, .LBB25_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: slt a0, a1, a3
+; RV32I-NEXT: beqz a0, .LBB25_3
+; RV32I-NEXT: j .LBB25_4
+; RV32I-NEXT: .LBB25_2:
+; RV32I-NEXT: sltu a0, a0, a2
+; RV32I-NEXT: bnez a0, .LBB25_4
+; RV32I-NEXT: .LBB25_3:
+; RV32I-NEXT: mv a4, a6
+; RV32I-NEXT: mv a5, a7
+; RV32I-NEXT: .LBB25_4:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setlt:
+; RV64I: # %bb.0:
+; RV64I-NEXT: blt a0, a1, .LBB25_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a2, a3
+; RV64I-NEXT: .LBB25_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setlt:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: slt a0, a0, a1
@@ -911,6 +1406,35 @@ define i64 @setlt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
}
define i64 @setle(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setle:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beq a1, a3, .LBB26_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: slt a0, a3, a1
+; RV32I-NEXT: xori a0, a0, 1
+; RV32I-NEXT: beqz a0, .LBB26_3
+; RV32I-NEXT: j .LBB26_4
+; RV32I-NEXT: .LBB26_2:
+; RV32I-NEXT: sltu a0, a2, a0
+; RV32I-NEXT: xori a0, a0, 1
+; RV32I-NEXT: bnez a0, .LBB26_4
+; RV32I-NEXT: .LBB26_3:
+; RV32I-NEXT: mv a4, a6
+; RV32I-NEXT: mv a5, a7
+; RV32I-NEXT: .LBB26_4:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setle:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bge a1, a0, .LBB26_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a2, a3
+; RV64I-NEXT: .LBB26_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setle:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: slt a0, a1, a0
@@ -958,6 +1482,33 @@ define i64 @setle(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
}
define i64 @setugt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setugt:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beq a1, a3, .LBB27_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: sltu a0, a3, a1
+; RV32I-NEXT: beqz a0, .LBB27_3
+; RV32I-NEXT: j .LBB27_4
+; RV32I-NEXT: .LBB27_2:
+; RV32I-NEXT: sltu a0, a2, a0
+; RV32I-NEXT: bnez a0, .LBB27_4
+; RV32I-NEXT: .LBB27_3:
+; RV32I-NEXT: mv a4, a6
+; RV32I-NEXT: mv a5, a7
+; RV32I-NEXT: .LBB27_4:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setugt:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bltu a1, a0, .LBB27_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a2, a3
+; RV64I-NEXT: .LBB27_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setugt:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: sltu a0, a1, a0
@@ -1002,6 +1553,35 @@ define i64 @setugt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
}
define i64 @setuge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setuge:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beq a1, a3, .LBB28_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: sltu a0, a1, a3
+; RV32I-NEXT: xori a0, a0, 1
+; RV32I-NEXT: beqz a0, .LBB28_3
+; RV32I-NEXT: j .LBB28_4
+; RV32I-NEXT: .LBB28_2:
+; RV32I-NEXT: sltu a0, a0, a2
+; RV32I-NEXT: xori a0, a0, 1
+; RV32I-NEXT: bnez a0, .LBB28_4
+; RV32I-NEXT: .LBB28_3:
+; RV32I-NEXT: mv a4, a6
+; RV32I-NEXT: mv a5, a7
+; RV32I-NEXT: .LBB28_4:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setuge:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bgeu a0, a1, .LBB28_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a2, a3
+; RV64I-NEXT: .LBB28_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setuge:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: sltu a0, a0, a1
@@ -1049,6 +1629,33 @@ define i64 @setuge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
}
define i64 @setult(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setult:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beq a1, a3, .LBB29_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: sltu a0, a1, a3
+; RV32I-NEXT: beqz a0, .LBB29_3
+; RV32I-NEXT: j .LBB29_4
+; RV32I-NEXT: .LBB29_2:
+; RV32I-NEXT: sltu a0, a0, a2
+; RV32I-NEXT: bnez a0, .LBB29_4
+; RV32I-NEXT: .LBB29_3:
+; RV32I-NEXT: mv a4, a6
+; RV32I-NEXT: mv a5, a7
+; RV32I-NEXT: .LBB29_4:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setult:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bltu a0, a1, .LBB29_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a2, a3
+; RV64I-NEXT: .LBB29_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setult:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: sltu a0, a0, a1
@@ -1093,6 +1700,35 @@ define i64 @setult(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
}
define i64 @setule(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setule:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beq a1, a3, .LBB30_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: sltu a0, a3, a1
+; RV32I-NEXT: xori a0, a0, 1
+; RV32I-NEXT: beqz a0, .LBB30_3
+; RV32I-NEXT: j .LBB30_4
+; RV32I-NEXT: .LBB30_2:
+; RV32I-NEXT: sltu a0, a2, a0
+; RV32I-NEXT: xori a0, a0, 1
+; RV32I-NEXT: bnez a0, .LBB30_4
+; RV32I-NEXT: .LBB30_3:
+; RV32I-NEXT: mv a4, a6
+; RV32I-NEXT: mv a5, a7
+; RV32I-NEXT: .LBB30_4:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setule:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bgeu a1, a0, .LBB30_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a2, a3
+; RV64I-NEXT: .LBB30_2:
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setule:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: sltu a0, a1, a0
@@ -1140,6 +1776,27 @@ define i64 @setule(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
}
define i64 @seteq_zero(i64 %a, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: seteq_zero:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a6, a0, a1
+; RV32I-NEXT: mv a1, a3
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: beqz a6, .LBB31_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: .LBB31_2:
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: seteq_zero:
+; RV64I: # %bb.0:
+; RV64I-NEXT: beqz a0, .LBB31_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a1, a2
+; RV64I-NEXT: .LBB31_2:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: seteq_zero:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a1, a1, a0
@@ -1176,6 +1833,27 @@ define i64 @seteq_zero(i64 %a, i64 %rs1, i64 %rs2) {
}
define i64 @setne_zero(i64 %a, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setne_zero:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a6, a0, a1
+; RV32I-NEXT: mv a1, a3
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: bnez a6, .LBB32_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: .LBB32_2:
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setne_zero:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bnez a0, .LBB32_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a1, a2
+; RV64I-NEXT: .LBB32_2:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setne_zero:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a2, a2, a0
@@ -1212,6 +1890,29 @@ define i64 @setne_zero(i64 %a, i64 %rs1, i64 %rs2) {
}
define i64 @seteq_constant(i64 %a, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: seteq_constant:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xori a0, a0, 123
+; RV32I-NEXT: or a6, a0, a1
+; RV32I-NEXT: mv a1, a3
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: beqz a6, .LBB33_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: .LBB33_2:
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: seteq_constant:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a3, 123
+; RV64I-NEXT: beq a0, a3, .LBB33_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a1, a2
+; RV64I-NEXT: .LBB33_2:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: seteq_constant:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -123
@@ -1252,6 +1953,29 @@ define i64 @seteq_constant(i64 %a, i64 %rs1, i64 %rs2) {
}
define i64 @setne_constant(i64 %a, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setne_constant:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xori a0, a0, 456
+; RV32I-NEXT: or a6, a0, a1
+; RV32I-NEXT: mv a1, a3
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: bnez a6, .LBB34_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: .LBB34_2:
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setne_constant:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a3, 456
+; RV64I-NEXT: bne a0, a3, .LBB34_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a1, a2
+; RV64I-NEXT: .LBB34_2:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setne_constant:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -456
@@ -1292,6 +2016,32 @@ define i64 @setne_constant(i64 %a, i64 %rs1, i64 %rs2) {
}
define i64 @seteq_2048(i64 %a, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: seteq_2048:
+; RV32I: # %bb.0:
+; RV32I-NEXT: li a6, 1
+; RV32I-NEXT: slli a6, a6, 11
+; RV32I-NEXT: xor a0, a0, a6
+; RV32I-NEXT: or a6, a0, a1
+; RV32I-NEXT: mv a1, a3
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: beqz a6, .LBB35_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: .LBB35_2:
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: seteq_2048:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a3, 1
+; RV64I-NEXT: slli a3, a3, 11
+; RV64I-NEXT: beq a0, a3, .LBB35_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a1, a2
+; RV64I-NEXT: .LBB35_2:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: seteq_2048:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -2048
@@ -1334,6 +2084,30 @@ define i64 @seteq_2048(i64 %a, i64 %rs1, i64 %rs2) {
}
define i64 @seteq_neg2048(i64 %a, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: seteq_neg2048:
+; RV32I: # %bb.0:
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: xori a0, a0, -2048
+; RV32I-NEXT: or a6, a0, a1
+; RV32I-NEXT: mv a1, a3
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: beqz a6, .LBB36_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: .LBB36_2:
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: seteq_neg2048:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a3, -2048
+; RV64I-NEXT: beq a0, a3, .LBB36_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a1, a2
+; RV64I-NEXT: .LBB36_2:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: seteq_neg2048:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
@@ -1375,6 +2149,30 @@ define i64 @seteq_neg2048(i64 %a, i64 %rs1, i64 %rs2) {
}
define i64 @setne_neg2048(i64 %a, i64 %rs1, i64 %rs2) {
+; RV32I-LABEL: setne_neg2048:
+; RV32I: # %bb.0:
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: xori a0, a0, -2048
+; RV32I-NEXT: or a6, a0, a1
+; RV32I-NEXT: mv a1, a3
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: bnez a6, .LBB37_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: mv a1, a5
+; RV32I-NEXT: .LBB37_2:
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: setne_neg2048:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a3, -2048
+; RV64I-NEXT: bne a0, a3, .LBB37_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a1, a2
+; RV64I-NEXT: .LBB37_2:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: setne_neg2048:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
@@ -1416,6 +2214,25 @@ define i64 @setne_neg2048(i64 %a, i64 %rs1, i64 %rs2) {
}
define i64 @zero1_seteq(i64 %a, i64 %b, i64 %rs1) {
+; RV32I-LABEL: zero1_seteq:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: snez a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a4
+; RV32I-NEXT: and a1, a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero1_seteq:
+; RV64I: # %bb.0:
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero1_seteq:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
@@ -1449,6 +2266,25 @@ define i64 @zero1_seteq(i64 %a, i64 %b, i64 %rs1) {
}
define i64 @zero2_seteq(i64 %a, i64 %b, i64 %rs1) {
+; RV32I-LABEL: zero2_seteq:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: seqz a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a4
+; RV32I-NEXT: and a1, a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero2_seteq:
+; RV64I: # %bb.0:
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: seqz a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero2_seteq:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
@@ -1482,6 +2318,25 @@ define i64 @zero2_seteq(i64 %a, i64 %b, i64 %rs1) {
}
define i64 @zero1_setne(i64 %a, i64 %b, i64 %rs1) {
+; RV32I-LABEL: zero1_setne:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: seqz a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a4
+; RV32I-NEXT: and a1, a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero1_setne:
+; RV64I: # %bb.0:
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: seqz a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero1_setne:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
@@ -1515,6 +2370,25 @@ define i64 @zero1_setne(i64 %a, i64 %b, i64 %rs1) {
}
define i64 @zero2_setne(i64 %a, i64 %b, i64 %rs1) {
+; RV32I-LABEL: zero2_setne:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: snez a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a4
+; RV32I-NEXT: and a1, a1, a5
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero2_setne:
+; RV64I: # %bb.0:
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero2_setne:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1
@@ -1548,6 +2422,22 @@ define i64 @zero2_setne(i64 %a, i64 %b, i64 %rs1) {
}
define i64 @zero1_seteq_zero(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero1_seteq_zero:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: snez a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero1_seteq_zero:
+; RV64I: # %bb.0:
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero1_seteq_zero:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
@@ -1576,6 +2466,22 @@ define i64 @zero1_seteq_zero(i64 %a, i64 %rs1) {
}
define i64 @zero2_seteq_zero(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero2_seteq_zero:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: seqz a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero2_seteq_zero:
+; RV64I: # %bb.0:
+; RV64I-NEXT: seqz a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero2_seteq_zero:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
@@ -1604,6 +2510,22 @@ define i64 @zero2_seteq_zero(i64 %a, i64 %rs1) {
}
define i64 @zero1_setne_zero(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero1_setne_zero:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: seqz a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero1_setne_zero:
+; RV64I: # %bb.0:
+; RV64I-NEXT: seqz a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero1_setne_zero:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0
@@ -1632,6 +2554,22 @@ define i64 @zero1_setne_zero(i64 %a, i64 %rs1) {
}
define i64 @zero2_setne_zero(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero2_setne_zero:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: snez a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero2_setne_zero:
+; RV64I: # %bb.0:
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero2_setne_zero:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0
@@ -1660,6 +2598,25 @@ define i64 @zero2_setne_zero(i64 %a, i64 %rs1) {
}
define i64 @zero1_seteq_constant(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero1_seteq_constant:
+; RV32I: # %bb.0:
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: xori a0, a0, -231
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: snez a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero1_seteq_constant:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, 231
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero1_seteq_constant:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: addi a0, a0, 231
@@ -1693,6 +2650,24 @@ define i64 @zero1_seteq_constant(i64 %a, i64 %rs1) {
}
define i64 @zero2_seteq_constant(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero2_seteq_constant:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xori a0, a0, 546
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: seqz a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero2_seteq_constant:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -546
+; RV64I-NEXT: seqz a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero2_seteq_constant:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -546
@@ -1725,6 +2700,24 @@ define i64 @zero2_seteq_constant(i64 %a, i64 %rs1) {
}
define i64 @zero1_setne_constant(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero1_setne_constant:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xori a0, a0, 321
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: seqz a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero1_setne_constant:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -321
+; RV64I-NEXT: seqz a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero1_setne_constant:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -321
@@ -1757,6 +2750,25 @@ define i64 @zero1_setne_constant(i64 %a, i64 %rs1) {
}
define i64 @zero2_setne_constant(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero2_setne_constant:
+; RV32I: # %bb.0:
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: xori a0, a0, -654
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: snez a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero2_setne_constant:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, 654
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero2_setne_constant:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: addi a0, a0, 654
@@ -1790,6 +2802,25 @@ define i64 @zero2_setne_constant(i64 %a, i64 %rs1) {
}
define i64 @zero1_seteq_neg2048(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero1_seteq_neg2048:
+; RV32I: # %bb.0:
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: xori a0, a0, -2048
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: snez a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero1_seteq_neg2048:
+; RV64I: # %bb.0:
+; RV64I-NEXT: xori a0, a0, -2048
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero1_seteq_neg2048:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
@@ -1823,6 +2854,25 @@ define i64 @zero1_seteq_neg2048(i64 %a, i64 %rs1) {
}
define i64 @zero2_seteq_neg2048(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero2_seteq_neg2048:
+; RV32I: # %bb.0:
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: xori a0, a0, -2048
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: seqz a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero2_seteq_neg2048:
+; RV64I: # %bb.0:
+; RV64I-NEXT: xori a0, a0, -2048
+; RV64I-NEXT: seqz a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero2_seteq_neg2048:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
@@ -1856,6 +2906,25 @@ define i64 @zero2_seteq_neg2048(i64 %a, i64 %rs1) {
}
define i64 @zero1_setne_neg2048(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero1_setne_neg2048:
+; RV32I: # %bb.0:
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: xori a0, a0, -2048
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: seqz a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero1_setne_neg2048:
+; RV64I: # %bb.0:
+; RV64I-NEXT: xori a0, a0, -2048
+; RV64I-NEXT: seqz a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero1_setne_neg2048:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
@@ -1889,6 +2958,25 @@ define i64 @zero1_setne_neg2048(i64 %a, i64 %rs1) {
}
define i64 @zero2_setne_neg2048(i64 %a, i64 %rs1) {
+; RV32I-LABEL: zero2_setne_neg2048:
+; RV32I: # %bb.0:
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: xori a0, a0, -2048
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: snez a0, a0
+; RV32I-NEXT: addi a1, a0, -1
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: and a1, a1, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: zero2_setne_neg2048:
+; RV64I: # %bb.0:
+; RV64I-NEXT: xori a0, a0, -2048
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: zero2_setne_neg2048:
; RV64XVENTANACONDOPS: # %bb.0:
; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048
@@ -1921,8 +3009,53 @@ define i64 @zero2_setne_neg2048(i64 %a, i64 %rs1) {
ret i64 %sel
}
-; Test that we are able to convert the sext.w int he loop to mv.
define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nounwind {
+; RV32I-LABEL: sextw_removal_maskc:
+; RV32I: # %bb.0: # %bb
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: mv s0, a2
+; RV32I-NEXT: slli a0, a0, 31
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: and s1, a0, a1
+; RV32I-NEXT: .LBB54_1: # %bb2
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: mv a0, s1
+; RV32I-NEXT: call bar at plt
+; RV32I-NEXT: sll s1, s1, s0
+; RV32I-NEXT: bnez a0, .LBB54_1
+; RV32I-NEXT: # %bb.2: # %bb7
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: sextw_removal_maskc:
+; RV64I: # %bb.0: # %bb
+; RV64I-NEXT: addi sp, sp, -32
+; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: mv s0, a2
+; RV64I-NEXT: slli a0, a0, 63
+; RV64I-NEXT: srai a0, a0, 63
+; RV64I-NEXT: and s1, a0, a1
+; RV64I-NEXT: .LBB54_1: # %bb2
+; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT: mv a0, s1
+; RV64I-NEXT: call bar at plt
+; RV64I-NEXT: sllw s1, s1, s0
+; RV64I-NEXT: bnez a0, .LBB54_1
+; RV64I-NEXT: # %bb.2: # %bb7
+; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 32
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: sextw_removal_maskc:
; RV64XVENTANACONDOPS: # %bb.0: # %bb
; RV64XVENTANACONDOPS-NEXT: addi sp, sp, -32
@@ -2028,6 +3161,52 @@ bb7: ; preds = %bb2
declare signext i32 @bar(i32 signext)
define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) nounwind {
+; RV32I-LABEL: sextw_removal_maskcn:
+; RV32I: # %bb.0: # %bb
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: mv s0, a2
+; RV32I-NEXT: andi a0, a0, 1
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: and s1, a0, a1
+; RV32I-NEXT: .LBB55_1: # %bb2
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: mv a0, s1
+; RV32I-NEXT: call bar at plt
+; RV32I-NEXT: sll s1, s1, s0
+; RV32I-NEXT: bnez a0, .LBB55_1
+; RV32I-NEXT: # %bb.2: # %bb7
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: sextw_removal_maskcn:
+; RV64I: # %bb.0: # %bb
+; RV64I-NEXT: addi sp, sp, -32
+; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: mv s0, a2
+; RV64I-NEXT: andi a0, a0, 1
+; RV64I-NEXT: addiw a0, a0, -1
+; RV64I-NEXT: and s1, a0, a1
+; RV64I-NEXT: .LBB55_1: # %bb2
+; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT: mv a0, s1
+; RV64I-NEXT: call bar at plt
+; RV64I-NEXT: sllw s1, s1, s0
+; RV64I-NEXT: bnez a0, .LBB55_1
+; RV64I-NEXT: # %bb.2: # %bb7
+; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 32
+; RV64I-NEXT: ret
+;
; RV64XVENTANACONDOPS-LABEL: sextw_removal_maskcn:
; RV64XVENTANACONDOPS: # %bb.0: # %bb
; RV64XVENTANACONDOPS-NEXT: addi sp, sp, -32
More information about the llvm-commits
mailing list