[llvm] cb15e65 - [RISCV] A test for conditional binary ops.

Mikhail Gudim via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 28 08:10:35 PDT 2023


Author: Mikhail Gudim
Date: 2023-07-28T11:08:02-04:00
New Revision: cb15e657b5ef5682a9fbda8d881d12cf8d8da79e

URL: https://github.com/llvm/llvm-project/commit/cb15e657b5ef5682a9fbda8d881d12cf8d8da79e
DIFF: https://github.com/llvm/llvm-project/commit/cb15e657b5ef5682a9fbda8d881d12cf8d8da79e.diff

LOG: [RISCV] A test for conditional binary ops.

Consider the following pattern:

```
%binop_ = binop %x, %y
%select_ = select %c, %binop_, %x
```

If there is an identity `%identity` operand for `binop`, it is possible to transform
the above code to:
```
%opearand = select %c, %y, %identity
%result = binop %x, %operand
```
This transformation is profitable when `%identity` is all zeroes or
ones.

This patch commits a test for such patterns.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D155481

Added: 
    llvm/test/CodeGen/RISCV/condbinops.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/condbinops.ll b/llvm/test/CodeGen/RISCV/condbinops.ll
new file mode 100644
index 00000000000000..5d655ca0ae108e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/condbinops.ll
@@ -0,0 +1,976 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 < %s | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 < %s | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv64 -mattr=+xventanacondops < %s | FileCheck %s -check-prefix=RV64XVENTANACONDOPS
+; RUN: llc -mtriple=riscv64 -mattr=+xtheadcondmov < %s | FileCheck %s -check-prefix=RV64XTHEADCONDMOV
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicond < %s | FileCheck %s -check-prefix=RV32ZICOND
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicond < %s | FileCheck %s -check-prefix=RV64ZICOND
+
+define i32 @shl32(i32 %x, i32 %y, i1 %c) {
+; RV32I-LABEL: shl32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a2, a2, 1
+; RV32I-NEXT:    beqz a2, .LBB0_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: shl32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a2, a2, 1
+; RV64I-NEXT:    beqz a2, .LBB0_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:  .LBB0_2:
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: shl32:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    sllw a1, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: shl32:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    sllw a1, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: shl32:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a2, a2, 1
+; RV32ZICOND-NEXT:    sll a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV32ZICOND-NEXT:    or a0, a1, a0
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: shl32:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    sllw a1, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %binop = shl i32 %x, %y
+  %select_ = select i1 %c, i32 %binop, i32 %x
+  ret i32 %select_
+}
+
+define i32 @ashr32(i32 %x, i32 %y, i1 %c) {
+; RV32I-LABEL: ashr32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a2, a2, 1
+; RV32I-NEXT:    beqz a2, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sra a0, a0, a1
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: ashr32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a2, a2, 1
+; RV64I-NEXT:    beqz a2, .LBB1_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: ashr32:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    sraw a1, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: ashr32:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    sraw a1, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: ashr32:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a2, a2, 1
+; RV32ZICOND-NEXT:    sra a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV32ZICOND-NEXT:    or a0, a1, a0
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: ashr32:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    sraw a1, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %binop = ashr i32 %x, %y
+  %select_ = select i1 %c, i32 %binop, i32 %x
+  ret i32 %select_
+}
+
+define i32 @lshr32(i32 %x, i32 %y, i1 %c) {
+; RV32I-LABEL: lshr32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a2, a2, 1
+; RV32I-NEXT:    beqz a2, .LBB2_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    srl a0, a0, a1
+; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: lshr32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a2, a2, 1
+; RV64I-NEXT:    beqz a2, .LBB2_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:  .LBB2_2:
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: lshr32:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    srlw a1, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: lshr32:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    srlw a1, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: lshr32:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a2, a2, 1
+; RV32ZICOND-NEXT:    srl a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV32ZICOND-NEXT:    or a0, a1, a0
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: lshr32:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    srlw a1, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %binop = lshr i32 %x, %y
+  %select_ = select i1 %c, i32 %binop, i32 %x
+  ret i32 %select_
+}
+
+define i32 @sub32(i32 %x, i32 %y, i1 %c) {
+; RV32I-LABEL: sub32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a2, 31
+; RV32I-NEXT:    srai a2, a2, 31
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sub32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srai a2, a2, 63
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: sub32:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    subw a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: sub32:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a2
+; RV64XTHEADCONDMOV-NEXT:    subw a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: sub32:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a2, a2, 1
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV32ZICOND-NEXT:    sub a0, a0, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: sub32:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    subw a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %binop = sub i32 %x, %y
+  %select_ = select i1 %c, i32 %binop, i32 %x
+  ret i32 %select_
+}
+
+define i32 @and32(i32 %x, i32 %y, i1 %c) {
+; RV32I-LABEL: and32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a2, a2, 1
+; RV32I-NEXT:    beqz a2, .LBB4_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a2, a2, 1
+; RV64I-NEXT:    beqz a2, .LBB4_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: and32:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    and a1, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: and32:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    and a1, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: and32:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a2, a2, 1
+; RV32ZICOND-NEXT:    and a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV32ZICOND-NEXT:    or a0, a1, a0
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: and32:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    and a1, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %binop = and i32 %x, %y
+  %select_ = select i1 %c, i32 %binop, i32 %x
+  ret i32 %select_
+}
+
+
+define i32 @add32(i32 %x, i32 %y, i1 %c) {
+; RV32I-LABEL: add32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a2, 31
+; RV32I-NEXT:    srai a2, a2, 31
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: add32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srai a2, a2, 63
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: add32:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    addw a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: add32:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a2
+; RV64XTHEADCONDMOV-NEXT:    addw a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: add32:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a2, a2, 1
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV32ZICOND-NEXT:    add a0, a0, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: add32:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    addw a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %binop = add i32 %x, %y
+  %select_ = select i1 %c, i32 %binop, i32 %x
+  ret i32 %select_
+}
+
+
+define i32 @or32(i32 %x, i32 %y, i1 %c) {
+; RV32I-LABEL: or32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a2, 31
+; RV32I-NEXT:    srai a2, a2, 31
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: or32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srai a2, a2, 63
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: or32:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: or32:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a2
+; RV64XTHEADCONDMOV-NEXT:    or a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: or32:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a2, a2, 1
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV32ZICOND-NEXT:    or a0, a0, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: or32:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %binop = or i32 %x, %y
+  %select_ = select i1 %c, i32 %binop, i32 %x
+  ret i32 %select_
+}
+
+define i32 @xor32(i32 %x, i32 %y, i1 %c) {
+; RV32I-LABEL: xor32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a2, 31
+; RV32I-NEXT:    srai a2, a2, 31
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: xor32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srai a2, a2, 63
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: xor32:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: xor32:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a2
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: xor32:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a2, a2, 1
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV32ZICOND-NEXT:    xor a0, a0, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: xor32:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %binop = xor i32 %x, %y
+  %select_ = select i1 %c, i32 %binop, i32 %x
+  ret i32 %select_
+}
+
+define i64 @shl64(i64 %x, i64 %y, i1 %c) {
+; RV32I-LABEL: shl64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a4, a4, 1
+; RV32I-NEXT:    addi a5, a2, -32
+; RV32I-NEXT:    sll a3, a0, a2
+; RV32I-NEXT:    bltz a5, .LBB8_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    beqz a4, .LBB8_4
+; RV32I-NEXT:  .LBB8_2:
+; RV32I-NEXT:    srai a0, a5, 31
+; RV32I-NEXT:    and a0, a0, a3
+; RV32I-NEXT:    mv a1, a2
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB8_3:
+; RV32I-NEXT:    sll a6, a1, a2
+; RV32I-NEXT:    not a2, a2
+; RV32I-NEXT:    srli a7, a0, 1
+; RV32I-NEXT:    srl a2, a7, a2
+; RV32I-NEXT:    or a2, a6, a2
+; RV32I-NEXT:    bnez a4, .LBB8_2
+; RV32I-NEXT:  .LBB8_4:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: shl64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a2, a2, 1
+; RV64I-NEXT:    beqz a2, .LBB8_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:  .LBB8_2:
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: shl64:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    sll a1, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: shl64:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    sll a1, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: shl64:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a4, a4, 1
+; RV32ZICOND-NEXT:    sll a3, a1, a2
+; RV32ZICOND-NEXT:    not a5, a2
+; RV32ZICOND-NEXT:    srli a6, a0, 1
+; RV32ZICOND-NEXT:    srl a5, a6, a5
+; RV32ZICOND-NEXT:    or a3, a3, a5
+; RV32ZICOND-NEXT:    addi a5, a2, -32
+; RV32ZICOND-NEXT:    slti a5, a5, 0
+; RV32ZICOND-NEXT:    czero.eqz a3, a3, a5
+; RV32ZICOND-NEXT:    sll a2, a0, a2
+; RV32ZICOND-NEXT:    czero.nez a6, a2, a5
+; RV32ZICOND-NEXT:    or a3, a3, a6
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a5
+; RV32ZICOND-NEXT:    czero.nez a0, a0, a4
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a4
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.eqz a2, a3, a4
+; RV32ZICOND-NEXT:    czero.nez a1, a1, a4
+; RV32ZICOND-NEXT:    or a1, a2, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: shl64:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    sll a1, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %binop = shl i64 %x, %y
+  %select_ = select i1 %c, i64 %binop, i64 %x
+  ret i64 %select_
+}
+
+define i64 @ashr64(i64 %x, i64 %y, i1 %c) {
+; RV32I-LABEL: ashr64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a5, a4, 1
+; RV32I-NEXT:    addi a3, a2, -32
+; RV32I-NEXT:    sra a4, a1, a2
+; RV32I-NEXT:    bltz a3, .LBB9_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    srai a2, a1, 31
+; RV32I-NEXT:    mv a3, a4
+; RV32I-NEXT:    mv a4, a2
+; RV32I-NEXT:    beqz a5, .LBB9_3
+; RV32I-NEXT:    j .LBB9_4
+; RV32I-NEXT:  .LBB9_2:
+; RV32I-NEXT:    srl a3, a0, a2
+; RV32I-NEXT:    not a2, a2
+; RV32I-NEXT:    slli a6, a1, 1
+; RV32I-NEXT:    sll a2, a6, a2
+; RV32I-NEXT:    or a3, a3, a2
+; RV32I-NEXT:    bnez a5, .LBB9_4
+; RV32I-NEXT:  .LBB9_3:
+; RV32I-NEXT:    mv a3, a0
+; RV32I-NEXT:    mv a4, a1
+; RV32I-NEXT:  .LBB9_4:
+; RV32I-NEXT:    mv a0, a3
+; RV32I-NEXT:    mv a1, a4
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: ashr64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a2, a2, 1
+; RV64I-NEXT:    beqz a2, .LBB9_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:  .LBB9_2:
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: ashr64:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    sra a1, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: ashr64:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    sra a1, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: ashr64:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a4, a4, 1
+; RV32ZICOND-NEXT:    srl a3, a0, a2
+; RV32ZICOND-NEXT:    not a5, a2
+; RV32ZICOND-NEXT:    slli a6, a1, 1
+; RV32ZICOND-NEXT:    sll a5, a6, a5
+; RV32ZICOND-NEXT:    or a3, a3, a5
+; RV32ZICOND-NEXT:    addi a5, a2, -32
+; RV32ZICOND-NEXT:    slti a5, a5, 0
+; RV32ZICOND-NEXT:    czero.eqz a3, a3, a5
+; RV32ZICOND-NEXT:    sra a2, a1, a2
+; RV32ZICOND-NEXT:    czero.nez a6, a2, a5
+; RV32ZICOND-NEXT:    or a3, a3, a6
+; RV32ZICOND-NEXT:    srai a6, a1, 31
+; RV32ZICOND-NEXT:    czero.nez a6, a6, a5
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a5
+; RV32ZICOND-NEXT:    or a2, a2, a6
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a4
+; RV32ZICOND-NEXT:    czero.nez a1, a1, a4
+; RV32ZICOND-NEXT:    or a1, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a3, a4
+; RV32ZICOND-NEXT:    czero.nez a0, a0, a4
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: ashr64:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    sra a1, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %binop = ashr i64 %x, %y
+  %select_ = select i1 %c, i64 %binop, i64 %x
+  ret i64 %select_
+}
+
+define i64 @lshr64(i64 %x, i64 %y, i1 %c) {
+; RV32I-LABEL: lshr64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a4, a4, 1
+; RV32I-NEXT:    addi a5, a2, -32
+; RV32I-NEXT:    srl a3, a1, a2
+; RV32I-NEXT:    bltz a5, .LBB10_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:    beqz a4, .LBB10_4
+; RV32I-NEXT:  .LBB10_2:
+; RV32I-NEXT:    srai a1, a5, 31
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB10_3:
+; RV32I-NEXT:    srl a6, a0, a2
+; RV32I-NEXT:    not a2, a2
+; RV32I-NEXT:    slli a7, a1, 1
+; RV32I-NEXT:    sll a2, a7, a2
+; RV32I-NEXT:    or a2, a6, a2
+; RV32I-NEXT:    bnez a4, .LBB10_2
+; RV32I-NEXT:  .LBB10_4:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: lshr64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a2, a2, 1
+; RV64I-NEXT:    beqz a2, .LBB10_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:  .LBB10_2:
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: lshr64:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    srl a1, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: lshr64:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    srl a1, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: lshr64:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a4, a4, 1
+; RV32ZICOND-NEXT:    srl a3, a0, a2
+; RV32ZICOND-NEXT:    not a5, a2
+; RV32ZICOND-NEXT:    slli a6, a1, 1
+; RV32ZICOND-NEXT:    sll a5, a6, a5
+; RV32ZICOND-NEXT:    or a3, a3, a5
+; RV32ZICOND-NEXT:    addi a5, a2, -32
+; RV32ZICOND-NEXT:    slti a5, a5, 0
+; RV32ZICOND-NEXT:    czero.eqz a3, a3, a5
+; RV32ZICOND-NEXT:    srl a2, a1, a2
+; RV32ZICOND-NEXT:    czero.nez a6, a2, a5
+; RV32ZICOND-NEXT:    or a3, a3, a6
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a5
+; RV32ZICOND-NEXT:    czero.nez a1, a1, a4
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a4
+; RV32ZICOND-NEXT:    or a1, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a3, a4
+; RV32ZICOND-NEXT:    czero.nez a0, a0, a4
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: lshr64:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    srl a1, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %binop = lshr i64 %x, %y
+  %select_ = select i1 %c, i64 %binop, i64 %x
+  ret i64 %select_
+}
+
+define i64 @sub64(i64 %x, i64 %y, i1 %c) {
+; RV32I-LABEL: sub64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a4, a4, 31
+; RV32I-NEXT:    srai a4, a4, 31
+; RV32I-NEXT:    and a2, a4, a2
+; RV32I-NEXT:    sltu a5, a0, a2
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sub64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srai a2, a2, 63
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: sub64:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    sub a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: sub64:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a2
+; RV64XTHEADCONDMOV-NEXT:    sub a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: sub64:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a4, a4, 1
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a4
+; RV32ZICOND-NEXT:    sltu a5, a0, a2
+; RV32ZICOND-NEXT:    czero.eqz a3, a3, a4
+; RV32ZICOND-NEXT:    sub a1, a1, a3
+; RV32ZICOND-NEXT:    sub a1, a1, a5
+; RV32ZICOND-NEXT:    sub a0, a0, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: sub64:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    sub a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %binop = sub i64 %x, %y
+  %select_ = select i1 %c, i64 %binop, i64 %x
+  ret i64 %select_
+}
+
+define i64 @and64(i64 %x, i64 %y, i1 %c) {
+; RV32I-LABEL: and64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a4, a4, 1
+; RV32I-NEXT:    beqz a4, .LBB12_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:  .LBB12_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a2, a2, 1
+; RV64I-NEXT:    beqz a2, .LBB12_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:  .LBB12_2:
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: and64:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    and a1, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: and64:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    and a1, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: and64:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a4, a4, 1
+; RV32ZICOND-NEXT:    and a3, a1, a3
+; RV32ZICOND-NEXT:    and a2, a0, a2
+; RV32ZICOND-NEXT:    czero.nez a0, a0, a4
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a1, a1, a4
+; RV32ZICOND-NEXT:    or a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: and64:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    and a1, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %binop = and i64 %x, %y
+  %select_ = select i1 %c, i64 %binop, i64 %x
+  ret i64 %select_
+}
+
+
+define i64 @add64(i64 %x, i64 %y, i1 %c) {
+; RV32I-LABEL: add64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a4, a4, 31
+; RV32I-NEXT:    srai a4, a4, 31
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    add a1, a1, a3
+; RV32I-NEXT:    and a2, a4, a2
+; RV32I-NEXT:    add a2, a0, a2
+; RV32I-NEXT:    sltu a0, a2, a0
+; RV32I-NEXT:    add a1, a1, a0
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: add64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srai a2, a2, 63
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: add64:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    add a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: add64:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a2
+; RV64XTHEADCONDMOV-NEXT:    add a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: add64:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a4, a4, 1
+; RV32ZICOND-NEXT:    czero.eqz a3, a3, a4
+; RV32ZICOND-NEXT:    add a1, a1, a3
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a4
+; RV32ZICOND-NEXT:    add a2, a0, a2
+; RV32ZICOND-NEXT:    sltu a0, a2, a0
+; RV32ZICOND-NEXT:    add a1, a1, a0
+; RV32ZICOND-NEXT:    mv a0, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: add64:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    add a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %binop = add i64 %x, %y
+  %select_ = select i1 %c, i64 %binop, i64 %x
+  ret i64 %select_
+}
+
+
+define i64 @or64(i64 %x, i64 %y, i1 %c) {
+; RV32I-LABEL: or64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a4, a4, 31
+; RV32I-NEXT:    srai a4, a4, 31
+; RV32I-NEXT:    and a2, a4, a2
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: or64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srai a2, a2, 63
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: or64:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: or64:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a2
+; RV64XTHEADCONDMOV-NEXT:    or a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: or64:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a4, a4, 1
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a4
+; RV32ZICOND-NEXT:    or a0, a0, a2
+; RV32ZICOND-NEXT:    czero.eqz a2, a3, a4
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: or64:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %binop = or i64 %x, %y
+  %select_ = select i1 %c, i64 %binop, i64 %x
+  ret i64 %select_
+}
+
+define i64 @xor64(i64 %x, i64 %y, i1 %c) {
+; RV32I-LABEL: xor64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a4, a4, 31
+; RV32I-NEXT:    srai a4, a4, 31
+; RV32I-NEXT:    and a2, a4, a2
+; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    xor a1, a1, a3
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: xor64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srai a2, a2, 63
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: xor64:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    andi a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: xor64:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    andi a2, a2, 1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a2
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: xor64:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    andi a4, a4, 1
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a4
+; RV32ZICOND-NEXT:    xor a0, a0, a2
+; RV32ZICOND-NEXT:    czero.eqz a2, a3, a4
+; RV32ZICOND-NEXT:    xor a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: xor64:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    andi a2, a2, 1
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %binop = xor i64 %x, %y
+  %select_ = select i1 %c, i64 %binop, i64 %x
+  ret i64 %select_
+}


        


More information about the llvm-commits mailing list