[llvm] [LoongArch][NFC] Pre-commit for conditional branch optimization (PR #151788)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 1 20:57:02 PDT 2025
https://github.com/heiher created https://github.com/llvm/llvm-project/pull/151788
None
>From fd0e9f6d7a3407e92d3e479230536b2a1ea8e811 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Sat, 2 Aug 2025 11:38:01 +0800
Subject: [PATCH] [LoongArch][NFC] Pre-commit for conditional branch
optimization
---
llvm/test/CodeGen/LoongArch/bittest.ll | 3366 +++++++++++++++++++
llvm/test/CodeGen/LoongArch/select-const.ll | 25 +
2 files changed, 3391 insertions(+)
create mode 100644 llvm/test/CodeGen/LoongArch/bittest.ll
diff --git a/llvm/test/CodeGen/LoongArch/bittest.ll b/llvm/test/CodeGen/LoongArch/bittest.ll
new file mode 100644
index 0000000000000..210e4edbb38ff
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/bittest.ll
@@ -0,0 +1,3366 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 -mattr=+d < %s | FileCheck %s --check-prefixes=CHECK,LA32
+; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s --check-prefixes=CHECK,LA64
+
+define signext i32 @bittest_7_i32(i32 signext %a) nounwind {
+; LA32-LABEL: bittest_7_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 7
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_7_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 7, 7
+; LA64-NEXT: ret
+ %shr = lshr i32 %a, 7
+ %not = xor i32 %shr, -1
+ %and = and i32 %not, 1
+ ret i32 %and
+}
+
+define signext i32 @bittest_10_i32(i32 signext %a) nounwind {
+; LA32-LABEL: bittest_10_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 10
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_10_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 10, 10
+; LA64-NEXT: ret
+ %shr = lshr i32 %a, 10
+ %not = xor i32 %shr, -1
+ %and = and i32 %not, 1
+ ret i32 %and
+}
+
+define signext i32 @bittest_11_i32(i32 signext %a) nounwind {
+; LA32-LABEL: bittest_11_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 11
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_11_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 11, 11
+; LA64-NEXT: ret
+ %shr = lshr i32 %a, 11
+ %not = xor i32 %shr, -1
+ %and = and i32 %not, 1
+ ret i32 %and
+}
+
+define signext i32 @bittest_31_i32(i32 signext %a) nounwind {
+; LA32-LABEL: bittest_31_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 31
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_31_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 31
+; LA64-NEXT: ret
+ %shr = lshr i32 %a, 31
+ %not = xor i32 %shr, -1
+ %and = and i32 %not, 1
+ ret i32 %and
+}
+
+define i64 @bittest_7_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_7_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 7
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_7_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 7, 7
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 7
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i64 @bittest_10_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_10_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 10
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_10_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 10, 10
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 10
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i64 @bittest_11_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_11_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 11
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_11_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 11, 11
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 11
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i64 @bittest_31_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_31_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 31
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_31_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 31
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 31
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i64 @bittest_32_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_32_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: ori $a0, $zero, 1
+; LA32-NEXT: andn $a0, $a0, $a1
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_32_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 32, 32
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 32
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i64 @bittest_63_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_63_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a1, $zero
+; LA32-NEXT: srli.w $a0, $a0, 31
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_63_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: srli.d $a0, $a0, 63
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 63
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i1 @bittest_constant_by_var_shr_i32(i32 signext %b) nounwind {
+; CHECK-LABEL: bittest_constant_by_var_shr_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lu12i.w $a1, 301408
+; CHECK-NEXT: ori $a1, $a1, 722
+; CHECK-NEXT: srl.w $a0, $a1, $a0
+; CHECK-NEXT: andi $a0, $a0, 1
+; CHECK-NEXT: ret
+ %shl = lshr i32 1234567890, %b
+ %and = and i32 %shl, 1
+ %cmp = icmp ne i32 %and, 0
+ ret i1 %cmp
+}
+
+define i1 @bittest_constant_by_var_shl_i32(i32 signext %b) nounwind {
+; CHECK-LABEL: bittest_constant_by_var_shl_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ori $a1, $zero, 1
+; CHECK-NEXT: sll.w $a0, $a1, $a0
+; CHECK-NEXT: lu12i.w $a1, 301408
+; CHECK-NEXT: ori $a1, $a1, 722
+; CHECK-NEXT: and $a0, $a0, $a1
+; CHECK-NEXT: sltu $a0, $zero, $a0
+; CHECK-NEXT: ret
+ %shl = shl i32 1, %b
+ %and = and i32 %shl, 1234567890
+ %cmp = icmp ne i32 %and, 0
+ ret i1 %cmp
+}
+
+define i1 @bittest_constant_by_var_shr_i64(i64 %b) nounwind {
+; LA32-LABEL: bittest_constant_by_var_shr_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 301408
+; LA32-NEXT: ori $a1, $a1, 722
+; LA32-NEXT: srl.w $a1, $a1, $a0
+; LA32-NEXT: addi.w $a0, $a0, -32
+; LA32-NEXT: slti $a0, $a0, 0
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_constant_by_var_shr_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, 301408
+; LA64-NEXT: ori $a1, $a1, 722
+; LA64-NEXT: srl.d $a0, $a1, $a0
+; LA64-NEXT: andi $a0, $a0, 1
+; LA64-NEXT: ret
+ %shl = lshr i64 1234567890, %b
+ %and = and i64 %shl, 1
+ %cmp = icmp ne i64 %and, 0
+ ret i1 %cmp
+}
+
+define i1 @bittest_constant_by_var_shl_i64(i64 %b) nounwind {
+; LA32-LABEL: bittest_constant_by_var_shl_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $a1, $a0, -32
+; LA32-NEXT: slti $a1, $a1, 0
+; LA32-NEXT: sub.w $a1, $zero, $a1
+; LA32-NEXT: ori $a2, $zero, 1
+; LA32-NEXT: sll.w $a0, $a2, $a0
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: lu12i.w $a1, 301408
+; LA32-NEXT: ori $a1, $a1, 722
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: sltu $a0, $zero, $a0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_constant_by_var_shl_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a1, $zero, 1
+; LA64-NEXT: sll.d $a0, $a1, $a0
+; LA64-NEXT: lu12i.w $a1, 301408
+; LA64-NEXT: ori $a1, $a1, 722
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: ret
+ %shl = shl i64 1, %b
+ %and = and i64 %shl, 1234567890
+ %cmp = icmp ne i64 %and, 0
+ ret i1 %cmp
+}
+
+define void @bittest_switch(i32 signext %0) {
+; LA32-LABEL: bittest_switch:
+; LA32: # %bb.0:
+; LA32-NEXT: ori $a1, $zero, 31
+; LA32-NEXT: bltu $a1, $a0, .LBB14_3
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ori $a1, $zero, 1
+; LA32-NEXT: sll.w $a0, $a1, $a0
+; LA32-NEXT: lu12i.w $a1, -524285
+; LA32-NEXT: ori $a1, $a1, 768
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB14_3
+; LA32-NEXT: # %bb.2:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB14_3:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_switch:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a1, $zero, 31
+; LA64-NEXT: bltu $a1, $a0, .LBB14_3
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ori $a1, $zero, 1
+; LA64-NEXT: sll.d $a0, $a1, $a0
+; LA64-NEXT: lu12i.w $a1, -524285
+; LA64-NEXT: ori $a1, $a1, 768
+; LA64-NEXT: lu32i.d $a1, 0
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB14_3
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB14_3:
+; LA64-NEXT: ret
+ switch i32 %0, label %3 [
+ i32 8, label %2
+ i32 9, label %2
+ i32 12, label %2
+ i32 13, label %2
+ i32 31, label %2
+ ]
+
+2:
+ tail call void @bar()
+ br label %3
+
+3:
+ ret void
+}
+
+declare void @bar()
+
+define signext i32 @bit_10_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_10_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a3, $a0, 1024
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB15_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB15_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1024
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_10_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_10_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1024
+; LA32-NEXT: srli.w $a3, $a0, 10
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB16_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB16_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 10, 10
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1024
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_11_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_11_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 20
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bgez $a3, .LBB17_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB17_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2048
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_11_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_11_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2048
+; LA32-NEXT: srli.w $a3, $a0, 11
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB18_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB18_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 11, 11
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2048
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_20_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_20_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 11
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bgez $a3, .LBB19_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB19_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a3, 256
+; LA64-NEXT: and $a0, $a0, $a3
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1048576
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_20_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_20_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a3, 256
+; LA32-NEXT: and $a0, $a0, $a3
+; LA32-NEXT: srli.w $a3, $a0, 20
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB20_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB20_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 20, 20
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1048576
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_31_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_31_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: bgez $a0, .LBB21_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a1, $a2
+; LA32-NEXT: .LBB21_2:
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 30, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2147483648
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_31_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_31_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a3, $a0, 31
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB22_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB22_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 30, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2147483648
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define i64 @bit_10_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_10_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a0, 1024
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB23_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB23_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1024
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_10_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_10_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a0, $a0, 10
+; LA32-NEXT: andi $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB24_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB24_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 10, 10
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1024
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_11_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_11_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 20
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bgez $a6, .LBB25_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB25_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2048
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_11_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_11_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a0, $a0, 11
+; LA32-NEXT: andi $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB26_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB26_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 11, 11
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2048
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_20_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_20_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 11
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bgez $a6, .LBB27_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB27_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a3, 256
+; LA64-NEXT: and $a0, $a0, $a3
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1048576
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_20_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_20_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a0, $a0, 20
+; LA32-NEXT: andi $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB28_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB28_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 20, 20
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1048576
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_31_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_31_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: bgez $a0, .LBB29_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a2, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB29_2:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a3, -524288
+; LA64-NEXT: lu32i.d $a3, 0
+; LA64-NEXT: and $a0, $a0, $a3
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2147483648
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_31_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_31_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a6, $a0, 31
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB30_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB30_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 31
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2147483648
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_32_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_32_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a1, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB31_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB31_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a3, $zero, 0
+; LA64-NEXT: lu32i.d $a3, 1
+; LA64-NEXT: and $a0, $a0, $a3
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 4294967296
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_32_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_32_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a1, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB32_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB32_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 32, 32
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 4294967296
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_55_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_55_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a1, 8
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bgez $a6, .LBB33_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB33_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_55_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu52i.d $a3, $zero, 8
+; LA64-NEXT: and $a0, $a0, $a3
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 36028797018963968
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_55_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_55_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a0, $a1, 23
+; LA32-NEXT: andi $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB34_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB34_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_55_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 55, 55
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 36028797018963968
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_63_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_63_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bgez $a1, .LBB35_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a3, $a5
+; LA32-NEXT: .LBB35_2:
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 62, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 9223372036854775808
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_63_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_63_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a6, $a1, 31
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB36_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB36_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: srli.d $a0, $a0, 63
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 9223372036854775808
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define void @bit_10_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_10_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1024
+; LA32-NEXT: bne $a0, $zero, .LBB37_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB37_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: bnez $a0, .LBB37_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB37_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 1024
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_10_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1024
+; LA32-NEXT: beq $a0, $zero, .LBB38_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB38_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: beqz $a0, .LBB38_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB38_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 1024
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_11_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2048
+; LA32-NEXT: bne $a0, $zero, .LBB39_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB39_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: bnez $a0, .LBB39_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB39_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2048
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_11_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2048
+; LA32-NEXT: beq $a0, $zero, .LBB40_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB40_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: beqz $a0, .LBB40_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB40_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2048
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_24_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4096
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: bne $a0, $zero, .LBB41_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB41_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, 4096
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: bnez $a0, .LBB41_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB41_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 16777216
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_24_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4096
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB42_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB42_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, 4096
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB42_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB42_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 16777216
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_31_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, -524288
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: bne $a0, $zero, .LBB43_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB43_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 30, 0
+; LA64-NEXT: bnez $a0, .LBB43_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB43_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2147483648
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_31_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, -524288
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB44_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB44_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 30, 0
+; LA64-NEXT: beqz $a0, .LBB44_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB44_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2147483648
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_10_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1024
+; LA32-NEXT: bne $a0, $zero, .LBB45_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB45_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: bnez $a0, .LBB45_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB45_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 1024
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_10_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1024
+; LA32-NEXT: beq $a0, $zero, .LBB46_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB46_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: beqz $a0, .LBB46_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB46_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 1024
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_11_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2048
+; LA32-NEXT: bne $a0, $zero, .LBB47_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB47_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: bnez $a0, .LBB47_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB47_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2048
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_11_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2048
+; LA32-NEXT: beq $a0, $zero, .LBB48_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB48_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: beqz $a0, .LBB48_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB48_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2048
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_24_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4096
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: bne $a0, $zero, .LBB49_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB49_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, 4096
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: bnez $a0, .LBB49_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB49_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 16777216
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_24_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4096
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB50_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB50_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, 4096
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB50_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB50_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 16777216
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_31_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, -524288
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: bne $a0, $zero, .LBB51_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB51_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, -524288
+; LA64-NEXT: lu32i.d $a1, 0
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: bnez $a0, .LBB51_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB51_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2147483648
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_31_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, -524288
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB52_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB52_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, -524288
+; LA64-NEXT: lu32i.d $a1, 0
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB52_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB52_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2147483648
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_32_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a1, 1
+; LA32-NEXT: bne $a0, $zero, .LBB53_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB53_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a1, $zero, 0
+; LA64-NEXT: lu32i.d $a1, 1
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: bnez $a0, .LBB53_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB53_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4294967296
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_32_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a1, 1
+; LA32-NEXT: beq $a0, $zero, .LBB54_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB54_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a1, $zero, 0
+; LA64-NEXT: lu32i.d $a1, 1
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB54_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB54_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4294967296
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_62_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_62_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a0, 262144
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: bne $a0, $zero, .LBB55_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB55_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_62_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu52i.d $a1, $zero, 1024
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: bnez $a0, .LBB55_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB55_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4611686018427387904
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_62_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_62_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a0, 262144
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: beq $a0, $zero, .LBB56_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB56_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_62_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu52i.d $a1, $zero, 1024
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB56_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB56_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4611686018427387904
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_63_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_63_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a0, -524288
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: bne $a0, $zero, .LBB57_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB57_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 62, 0
+; LA64-NEXT: bnez $a0, .LBB57_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB57_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 9223372036854775808
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_63_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_63_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a0, -524288
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: beq $a0, $zero, .LBB58_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB58_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 62, 0
+; LA64-NEXT: beqz $a0, .LBB58_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB58_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 9223372036854775808
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define signext i32 @bit_10_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_10_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a3, $a0, 1023
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB59_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB59_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1023
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_10_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_10_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a3, $a0, 1023
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB60_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB60_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1023
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_11_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_11_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a3, $a0, 2047
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB61_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB61_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2047
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_11_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_11_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a3, $a0, 2047
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB62_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB62_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2047
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_16_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_16_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 16
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB63_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB63_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 65535
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_16_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_16_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 16
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB64_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB64_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 65535
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_20_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_20_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 12
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB65_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB65_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 19, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1048575
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_20_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_20_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 12
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB66_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB66_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 19, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1048575
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_31_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_31_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 1
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB67_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB67_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2147483647
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_31_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_31_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 1
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB68_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB68_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2147483647
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_32_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_32_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: beq $a0, $zero, .LBB69_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a1, $a2
+; LA32-NEXT: .LBB69_2:
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 4294967295
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_32_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_32_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: bne $a0, $zero, .LBB70_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a1, $a2
+; LA32-NEXT: .LBB70_2:
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 4294967295
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define i64 @bit_10_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_10_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a0, 1023
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB71_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB71_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1023
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_10_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_10_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a0, 1023
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB72_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB72_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1023
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_11_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_11_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a0, 2047
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB73_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB73_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2047
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_11_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_11_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a0, 2047
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB74_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB74_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2047
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_16_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_16_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 16
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB75_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB75_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 65535
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_16_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_16_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: bne $a0, $zero, .LBB76_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a2, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB76_2:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 4294967295
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_20_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_20_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 12
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB77_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB77_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 19, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1048575
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_20_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_20_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 12
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB78_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB78_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 19, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1048575
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_31_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_31_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB79_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB79_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2147483647
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_31_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_31_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB80_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB80_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2147483647
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_32_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_32_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: beq $a0, $zero, .LBB81_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a2, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB81_2:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 4294967295
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_32_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_32_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: bne $a0, $zero, .LBB82_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a2, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB82_2:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 4294967295
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_55_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_55_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a6, 2047
+; LA32-NEXT: ori $a6, $a6, 4095
+; LA32-NEXT: and $a1, $a1, $a6
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB83_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB83_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_55_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 54, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 36028797018963967
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_55_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_55_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a6, 2047
+; LA32-NEXT: ori $a6, $a6, 4095
+; LA32-NEXT: and $a1, $a1, $a6
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB84_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB84_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_55_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 54, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 36028797018963967
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_63_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_63_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a6, 524287
+; LA32-NEXT: ori $a6, $a6, 4095
+; LA32-NEXT: and $a1, $a1, $a6
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB85_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB85_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 62, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 9223372036854775807
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_63_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_63_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a6, 524287
+; LA32-NEXT: ori $a6, $a6, 4095
+; LA32-NEXT: and $a1, $a1, $a6
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB86_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB86_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 62, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 9223372036854775807
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_64_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_64_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB87_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB87_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_64_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 18446744073709551615
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_64_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_64_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB88_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB88_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_64_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 18446744073709551615
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define void @bit_10_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_10_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1023
+; LA32-NEXT: beq $a0, $zero, .LBB89_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB89_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_10_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: beqz $a0, .LBB89_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB89_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 1023
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_10_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1023
+; LA32-NEXT: beq $a0, $zero, .LBB90_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB90_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: beqz $a0, .LBB90_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB90_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 1023
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_11_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2047
+; LA32-NEXT: beq $a0, $zero, .LBB91_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB91_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_11_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: beqz $a0, .LBB91_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB91_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 2047
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_11_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2047
+; LA32-NEXT: beq $a0, $zero, .LBB92_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB92_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: beqz $a0, .LBB92_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB92_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2047
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_16_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_16_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 15
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB93_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB93_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_16_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: beqz $a0, .LBB93_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB93_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 65535
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_16_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_16_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 15
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB94_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB94_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: beqz $a0, .LBB94_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB94_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 65535
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_24_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4095
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB95_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB95_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_24_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 23, 0
+; LA64-NEXT: beqz $a0, .LBB95_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB95_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 16777215
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_24_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4095
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB96_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB96_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 23, 0
+; LA64-NEXT: beqz $a0, .LBB96_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB96_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 16777215
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_31_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 524287
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB97_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB97_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_31_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: beqz $a0, .LBB97_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB97_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 2147483647
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_31_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 524287
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB98_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB98_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: beqz $a0, .LBB98_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB98_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2147483647
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_32_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: beq $a0, $zero, .LBB99_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB99_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_32_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: beqz $a0, .LBB99_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB99_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 4294967295
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_32_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: beq $a0, $zero, .LBB100_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB100_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: beqz $a0, .LBB100_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB100_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 4294967295
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_10_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1023
+; LA32-NEXT: beq $a0, $zero, .LBB101_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB101_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_10_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: beqz $a0, .LBB101_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB101_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 1023
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_10_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1023
+; LA32-NEXT: beq $a0, $zero, .LBB102_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB102_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: beqz $a0, .LBB102_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB102_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 1023
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_11_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2047
+; LA32-NEXT: beq $a0, $zero, .LBB103_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB103_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_11_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: beqz $a0, .LBB103_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB103_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 2047
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_11_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2047
+; LA32-NEXT: beq $a0, $zero, .LBB104_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB104_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: beqz $a0, .LBB104_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB104_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2047
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_16_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_16_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 15
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB105_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB105_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_16_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: beqz $a0, .LBB105_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB105_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 65535
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_16_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_16_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 15
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB106_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB106_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: beqz $a0, .LBB106_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB106_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 65535
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_24_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4095
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB107_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB107_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_24_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 23, 0
+; LA64-NEXT: beqz $a0, .LBB107_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB107_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 16777215
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_24_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4095
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB108_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB108_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 23, 0
+; LA64-NEXT: beqz $a0, .LBB108_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB108_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 16777215
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_31_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 524287
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB109_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB109_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_31_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: beqz $a0, .LBB109_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB109_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 2147483647
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_31_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 524287
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB110_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB110_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: beqz $a0, .LBB110_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB110_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2147483647
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_32_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: beq $a0, $zero, .LBB111_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB111_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_32_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: beqz $a0, .LBB111_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB111_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 4294967295
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_32_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: beq $a0, $zero, .LBB112_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB112_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: beqz $a0, .LBB112_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB112_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4294967295
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_62_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_62_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a2, 262143
+; LA32-NEXT: ori $a2, $a2, 4095
+; LA32-NEXT: and $a1, $a1, $a2
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB113_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB113_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_62_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 61, 0
+; LA64-NEXT: beqz $a0, .LBB113_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB113_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 4611686018427387903
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_62_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_62_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a2, 262143
+; LA32-NEXT: ori $a2, $a2, 4095
+; LA32-NEXT: and $a1, $a1, $a2
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB114_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB114_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_62_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 61, 0
+; LA64-NEXT: beqz $a0, .LBB114_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB114_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4611686018427387903
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_63_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_63_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a2, 524287
+; LA32-NEXT: ori $a2, $a2, 4095
+; LA32-NEXT: and $a1, $a1, $a2
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB115_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB115_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_63_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 62, 0
+; LA64-NEXT: beqz $a0, .LBB115_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB115_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 9223372036854775807
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_63_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_63_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a2, 524287
+; LA32-NEXT: ori $a2, $a2, 4095
+; LA32-NEXT: and $a1, $a1, $a2
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB116_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB116_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 62, 0
+; LA64-NEXT: beqz $a0, .LBB116_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB116_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 9223372036854775807
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_64_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_64_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB117_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB117_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_64_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: beqz $a0, .LBB117_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB117_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 18446744073709551615
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_64_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_64_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB118_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB118_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_64_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: beqz $a0, .LBB118_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB118_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 18446744073709551615
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/select-const.ll b/llvm/test/CodeGen/LoongArch/select-const.ll
index e9506b3a83592..00a64b8664801 100644
--- a/llvm/test/CodeGen/LoongArch/select-const.ll
+++ b/llvm/test/CodeGen/LoongArch/select-const.ll
@@ -301,3 +301,28 @@ define i32 @select_ne_10001_10002(i32 signext %a, i32 signext %b) {
%2 = select i1 %1, i32 10001, i32 10002
ret i32 %2
}
+
+define i32 @select_slt_zero_constant1_constant2(i32 signext %x) {
+; LA32-LABEL: select_slt_zero_constant1_constant2:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a1, $a0
+; LA32-NEXT: ori $a0, $zero, 7
+; LA32-NEXT: bltz $a1, .LBB16_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: addi.w $a0, $zero, -3
+; LA32-NEXT: .LBB16_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_slt_zero_constant1_constant2:
+; LA64: # %bb.0:
+; LA64-NEXT: slti $a0, $a0, 0
+; LA64-NEXT: addi.w $a1, $zero, -3
+; LA64-NEXT: masknez $a1, $a1, $a0
+; LA64-NEXT: ori $a2, $zero, 7
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %cmp = icmp slt i32 %x, 0
+ %cond = select i1 %cmp, i32 7, i32 -3
+ ret i32 %cond
+}
More information about the llvm-commits
mailing list