[llvm] 3497124 - [RISCV] Add more test for GORCI aliaes in Zbp extension

Ben Shi via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 23 00:02:50 PST 2022


Author: Lian Wang
Date: 2022-02-23T08:02:33Z
New Revision: 3497124771aa3730073360afd6470bf57122fede

URL: https://github.com/llvm/llvm-project/commit/3497124771aa3730073360afd6470bf57122fede
DIFF: https://github.com/llvm/llvm-project/commit/3497124771aa3730073360afd6470bf57122fede.diff

LOG: [RISCV] Add more test for GORCI aliaes in Zbp extension

Supplement tests for some aliaes of gorci.

RV32:
add orc4.h/orc2.h in rv32zbp.ll
add orc.h/orc16/orc8/orc4/orc2/orc in rv32zbp-intrinsic.ll

RV64:
add orc4.h/orc2.h in rv64zbp.ll
add orc.h/orc32/orc16/orc8/orc4/orc2/orc/orc16.w/orc8.w/
    orc4.w/orc2.w/orc.w in rv64zbp-intrinsic.ll

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D120388

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rv32zbp-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv32zbp.ll
    llvm/test/CodeGen/RISCV/rv64zbp-intrinsic.ll
    llvm/test/CodeGen/RISCV/rv64zbp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rv32zbp-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbp-intrinsic.ll
index 475d3b5460993..f7f9f8ad7e21f 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbp-intrinsic.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbp-intrinsic.ll
@@ -107,6 +107,60 @@ define i32 @gorci32(i32 %a) nounwind {
  ret i32 %tmp
 }
 
+define i32 @orchi32(i32 %a) nounwind {
+; RV32ZBP-LABEL: orchi32:
+; RV32ZBP:       # %bb.0:
+; RV32ZBP-NEXT:    orc.h a0, a0
+; RV32ZBP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 15)
+ ret i32 %tmp
+}
+
+define i32 @orc16i32(i32 %a) nounwind {
+; RV32ZBP-LABEL: orc16i32:
+; RV32ZBP:       # %bb.0:
+; RV32ZBP-NEXT:    orc16 a0, a0
+; RV32ZBP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 16)
+ ret i32 %tmp
+}
+
+define i32 @orc8i32(i32 %a) nounwind {
+; RV32ZBP-LABEL: orc8i32:
+; RV32ZBP:       # %bb.0:
+; RV32ZBP-NEXT:    orc8 a0, a0
+; RV32ZBP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 24)
+ ret i32 %tmp
+}
+
+define i32 @orc4i32(i32 %a) nounwind {
+; RV32ZBP-LABEL: orc4i32:
+; RV32ZBP:       # %bb.0:
+; RV32ZBP-NEXT:    orc4 a0, a0
+; RV32ZBP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 28)
+ ret i32 %tmp
+}
+
+define i32 @orc2i32(i32 %a) nounwind {
+; RV32ZBP-LABEL: orc2i32:
+; RV32ZBP:       # %bb.0:
+; RV32ZBP-NEXT:    orc2 a0, a0
+; RV32ZBP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 30)
+ ret i32 %tmp
+}
+
+define i32 @orci32(i32 %a) nounwind {
+; RV32ZBP-LABEL: orci32:
+; RV32ZBP:       # %bb.0:
+; RV32ZBP-NEXT:    orc a0, a0
+; RV32ZBP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.gorc.i32(i32 %a, i32 31)
+ ret i32 %tmp
+}
+
 declare i32 @llvm.riscv.shfl.i32(i32 %a, i32 %b)
 
 define i32 @shfl32(i32 %a, i32 %b) nounwind {

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbp.ll
index 7203aeb2a99b6..76464a5024484 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbp.ll
@@ -698,6 +698,246 @@ define i64 @gorc8_i64(i64 %a) nounwind {
   ret i64 %or2
 }
 
+define i32 @gorc12_i32(i32 %a) nounwind {
+; RV32I-LABEL: gorc12_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 4
+; RV32I-NEXT:    lui a2, 986895
+; RV32I-NEXT:    addi a2, a2, 240
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 4
+; RV32I-NEXT:    lui a3, 61681
+; RV32I-NEXT:    addi a3, a3, -241
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 8
+; RV32I-NEXT:    lui a2, 1044496
+; RV32I-NEXT:    addi a2, a2, -256
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 8
+; RV32I-NEXT:    lui a3, 4080
+; RV32I-NEXT:    addi a3, a3, 255
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBP-LABEL: gorc12_i32:
+; RV32ZBP:       # %bb.0:
+; RV32ZBP-NEXT:    orc4.h a0, a0
+; RV32ZBP-NEXT:    ret
+  %and1 = shl i32 %a, 4
+  %shl1 = and i32 %and1, -252645136
+  %and1b = lshr i32 %a, 4
+  %shr1 = and i32 %and1b, 252645135
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 8
+  %shl2 = and i32 %and2, -16711936
+  %and2b = lshr i32 %or1b, 8
+  %shr2 = and i32 %and2b, 16711935
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  ret i32 %or2b
+}
+
+define i64 @gorc12_i64(i64 %a) nounwind {
+; RV32I-LABEL: gorc12_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a1, 4
+; RV32I-NEXT:    slli a3, a0, 4
+; RV32I-NEXT:    lui a4, 986895
+; RV32I-NEXT:    addi a4, a4, 240
+; RV32I-NEXT:    and a3, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a0, 4
+; RV32I-NEXT:    srli a5, a1, 4
+; RV32I-NEXT:    lui a6, 61681
+; RV32I-NEXT:    addi a6, a6, -241
+; RV32I-NEXT:    and a5, a5, a6
+; RV32I-NEXT:    and a4, a4, a6
+; RV32I-NEXT:    or a0, a4, a0
+; RV32I-NEXT:    or a1, a5, a1
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    slli a2, a0, 8
+; RV32I-NEXT:    slli a3, a1, 8
+; RV32I-NEXT:    lui a4, 1044496
+; RV32I-NEXT:    addi a4, a4, -256
+; RV32I-NEXT:    and a3, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a1, 8
+; RV32I-NEXT:    srli a5, a0, 8
+; RV32I-NEXT:    lui a6, 4080
+; RV32I-NEXT:    addi a6, a6, 255
+; RV32I-NEXT:    and a5, a5, a6
+; RV32I-NEXT:    and a4, a4, a6
+; RV32I-NEXT:    or a1, a4, a1
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    ret
+;
+; RV32ZBP-LABEL: gorc12_i64:
+; RV32ZBP:       # %bb.0:
+; RV32ZBP-NEXT:    orc4.h a0, a0
+; RV32ZBP-NEXT:    orc4.h a1, a1
+; RV32ZBP-NEXT:    ret
+  %and1 = shl i64 %a, 4
+  %shl1 = and i64 %and1, -1085102592571150096
+  %and1b = lshr i64 %a, 4
+  %shr1 = and i64 %and1b, 1085102592571150095
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 8
+  %shl2 = and i64 %and2, -71777214294589696
+  %and2b = lshr i64 %or1b, 8
+  %shr2 = and i64 %and2b, 71777214294589695
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  ret i64 %or2b
+}
+
+define i32 @gorc14_i32(i32 %a) nounwind {
+; RV32I-LABEL: gorc14_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    lui a2, 838861
+; RV32I-NEXT:    addi a2, a2, -820
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 2
+; RV32I-NEXT:    lui a3, 209715
+; RV32I-NEXT:    addi a3, a3, 819
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 4
+; RV32I-NEXT:    lui a2, 986895
+; RV32I-NEXT:    addi a2, a2, 240
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 4
+; RV32I-NEXT:    lui a3, 61681
+; RV32I-NEXT:    addi a3, a3, -241
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 8
+; RV32I-NEXT:    lui a2, 1044496
+; RV32I-NEXT:    addi a2, a2, -256
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 8
+; RV32I-NEXT:    lui a3, 4080
+; RV32I-NEXT:    addi a3, a3, 255
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBP-LABEL: gorc14_i32:
+; RV32ZBP:       # %bb.0:
+; RV32ZBP-NEXT:    orc2.h a0, a0
+; RV32ZBP-NEXT:    ret
+  %and1 = shl i32 %a, 2
+  %shl1 = and i32 %and1, -858993460
+  %and1b = lshr i32 %a, 2
+  %shr1 = and i32 %and1b, 858993459
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 4
+  %shl2 = and i32 %and2, -252645136
+  %and2b = lshr i32 %or1b, 4
+  %shr2 = and i32 %and2b, 252645135
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  %and3 = shl i32 %or2b, 8
+  %shl3 = and i32 %and3, -16711936
+  %and3b = lshr i32 %or2b, 8
+  %shr3 = and i32 %and3b, 16711935
+  %or3 = or i32 %shr3, %or2b
+  %or3b = or i32 %or3, %shl3
+  ret i32 %or3b
+}
+
+define i64 @gorc14_i64(i64 %a) nounwind {
+; RV32I-LABEL: gorc14_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a0, 2
+; RV32I-NEXT:    slli a3, a1, 2
+; RV32I-NEXT:    lui a4, 838861
+; RV32I-NEXT:    addi a4, a4, -820
+; RV32I-NEXT:    and a3, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a1, 2
+; RV32I-NEXT:    srli a5, a0, 2
+; RV32I-NEXT:    lui a6, 209715
+; RV32I-NEXT:    addi a6, a6, 819
+; RV32I-NEXT:    and a5, a5, a6
+; RV32I-NEXT:    and a4, a4, a6
+; RV32I-NEXT:    or a1, a4, a1
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    slli a2, a1, 4
+; RV32I-NEXT:    slli a3, a0, 4
+; RV32I-NEXT:    lui a4, 986895
+; RV32I-NEXT:    addi a4, a4, 240
+; RV32I-NEXT:    and a3, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a0, 4
+; RV32I-NEXT:    srli a5, a1, 4
+; RV32I-NEXT:    lui a6, 61681
+; RV32I-NEXT:    addi a6, a6, -241
+; RV32I-NEXT:    and a5, a5, a6
+; RV32I-NEXT:    and a4, a4, a6
+; RV32I-NEXT:    or a0, a4, a0
+; RV32I-NEXT:    or a1, a5, a1
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    slli a2, a0, 8
+; RV32I-NEXT:    slli a3, a1, 8
+; RV32I-NEXT:    lui a4, 1044496
+; RV32I-NEXT:    addi a4, a4, -256
+; RV32I-NEXT:    and a3, a3, a4
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    srli a4, a1, 8
+; RV32I-NEXT:    srli a5, a0, 8
+; RV32I-NEXT:    lui a6, 4080
+; RV32I-NEXT:    addi a6, a6, 255
+; RV32I-NEXT:    and a5, a5, a6
+; RV32I-NEXT:    and a4, a4, a6
+; RV32I-NEXT:    or a1, a4, a1
+; RV32I-NEXT:    or a0, a5, a0
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    ret
+;
+; RV32ZBP-LABEL: gorc14_i64:
+; RV32ZBP:       # %bb.0:
+; RV32ZBP-NEXT:    orc2.h a0, a0
+; RV32ZBP-NEXT:    orc2.h a1, a1
+; RV32ZBP-NEXT:    ret
+  %and1 = shl i64 %a, 2
+  %shl1 = and i64 %and1, -3689348814741910324
+  %and1b = lshr i64 %a, 2
+  %shr1 = and i64 %and1b, 3689348814741910323
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 4
+  %shl2 = and i64 %and2, -1085102592571150096
+  %and2b = lshr i64 %or1b, 4
+  %shr2 = and i64 %and2b, 1085102592571150095
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  %and3 = shl i64 %or2b, 8
+  %shl3 = and i64 %and3, -71777214294589696
+  %and3b = lshr i64 %or2b, 8
+  %shr3 = and i64 %and3b, 71777214294589695
+  %or3 = or i64 %shr3, %or2b
+  %or3b = or i64 %or3, %shl3
+  ret i64 %or3b
+}
+
 define i32 @gorc16_i32(i32 %a) nounwind {
 ; RV32I-LABEL: gorc16_i32:
 ; RV32I:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbp-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbp-intrinsic.ll
index b82d520efbfa0..89130bc1dc4ef 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbp-intrinsic.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbp-intrinsic.ll
@@ -482,6 +482,114 @@ define i64 @gorci64(i64 %a) nounwind {
  ret i64 %tmp
 }
 
+define i64 @orchi64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orchi64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc.h a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 15)
+ ret i64 %tmp
+}
+
+define i64 @orc16wi64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orc16wi64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc16.w a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 16)
+ ret i64 %tmp
+}
+
+define i64 @orc8wi64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orc8wi64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc8.w a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 24)
+ ret i64 %tmp
+}
+
+define i64 @orc4wi64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orc4wi64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc4.w a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 28)
+ ret i64 %tmp
+}
+
+define i64 @orc2wi64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orc2wi64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc2.w a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 30)
+ ret i64 %tmp
+}
+
+define i64 @orcwi64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orcwi64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc.w a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 31)
+ ret i64 %tmp
+}
+
+define i64 @orc32i64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orc32i64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc32 a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 32)
+ ret i64 %tmp
+}
+
+define i64 @orc16i64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orc16i64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc16 a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 48)
+ ret i64 %tmp
+}
+
+define i64 @orc8i64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orc8i64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc8 a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 56)
+ ret i64 %tmp
+}
+
+define i64 @orc4i64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orc4i64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc4 a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 60)
+ ret i64 %tmp
+}
+
+define i64 @orc2i64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orc2i64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc2 a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 62)
+ ret i64 %tmp
+}
+
+define i64 @orci64(i64 %a) nounwind {
+; RV64ZBP-LABEL: orci64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc a0, a0
+; RV64ZBP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.gorc.i64(i64 %a, i64 63)
+ ret i64 %tmp
+}
+
 declare i64 @llvm.riscv.shfl.i64(i64 %a, i64 %b)
 
 define i64 @shfl64(i64 %a, i64 %b) nounwind {

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbp.ll
index 552991076629e..7210e2d41f686 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbp.ll
@@ -613,6 +613,215 @@ define i64 @gorc8_i64(i64 %a) nounwind {
   ret i64 %or2
 }
 
+define signext i32 @gorc12_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: gorc12_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slliw a1, a0, 4
+; RV64I-NEXT:    lui a2, 986895
+; RV64I-NEXT:    addiw a2, a2, 240
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 4
+; RV64I-NEXT:    lui a3, 61681
+; RV64I-NEXT:    addiw a3, a3, -241
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slliw a1, a0, 8
+; RV64I-NEXT:    lui a2, 1044496
+; RV64I-NEXT:    addiw a2, a2, -256
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 8
+; RV64I-NEXT:    lui a3, 4080
+; RV64I-NEXT:    addiw a3, a3, 255
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBP-LABEL: gorc12_i32:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    gorciw a0, a0, 12
+; RV64ZBP-NEXT:    ret
+  %and1 = shl i32 %a, 4
+  %shl1 = and i32 %and1, -252645136
+  %and1b = lshr i32 %a, 4
+  %shr1 = and i32 %and1b, 252645135
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 8
+  %shl2 = and i32 %and2, -16711936
+  %and2b = lshr i32 %or1b, 8
+  %shr2 = and i32 %and2b, 16711935
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  ret i32 %or2b
+}
+
+define i64 @gorc12_i64(i64 %a) nounwind {
+; RV64I-LABEL: gorc12_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, %hi(.LCPI17_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI17_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI17_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI17_1)(a2)
+; RV64I-NEXT:    slli a3, a0, 4
+; RV64I-NEXT:    and a1, a3, a1
+; RV64I-NEXT:    srli a3, a0, 4
+; RV64I-NEXT:    and a2, a3, a2
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    lui a1, %hi(.LCPI17_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI17_2)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI17_3)
+; RV64I-NEXT:    ld a2, %lo(.LCPI17_3)(a2)
+; RV64I-NEXT:    slli a3, a0, 8
+; RV64I-NEXT:    and a1, a3, a1
+; RV64I-NEXT:    srli a3, a0, 8
+; RV64I-NEXT:    and a2, a3, a2
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBP-LABEL: gorc12_i64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc4.h a0, a0
+; RV64ZBP-NEXT:    ret
+  %and1 = shl i64 %a, 4
+  %shl1 = and i64 %and1, -1085102592571150096
+  %and1b = lshr i64 %a, 4
+  %shr1 = and i64 %and1b, 1085102592571150095
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 8
+  %shl2 = and i64 %and2, -71777214294589696
+  %and2b = lshr i64 %or1b, 8
+  %shr2 = and i64 %and2b, 71777214294589695
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  ret i64 %or2b
+}
+
+define signext i32 @gorc14_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: gorc14_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slliw a1, a0, 2
+; RV64I-NEXT:    lui a2, 838861
+; RV64I-NEXT:    addiw a2, a2, -820
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    lui a3, 209715
+; RV64I-NEXT:    addiw a3, a3, 819
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slliw a1, a0, 4
+; RV64I-NEXT:    lui a2, 986895
+; RV64I-NEXT:    addiw a2, a2, 240
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 4
+; RV64I-NEXT:    lui a3, 61681
+; RV64I-NEXT:    addiw a3, a3, -241
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 8
+; RV64I-NEXT:    lui a2, 1044496
+; RV64I-NEXT:    addiw a2, a2, -256
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 8
+; RV64I-NEXT:    lui a3, 4080
+; RV64I-NEXT:    addiw a3, a3, 255
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBP-LABEL: gorc14_i32:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    gorciw a0, a0, 14
+; RV64ZBP-NEXT:    ret
+  %and1 = shl i32 %a, 2
+  %shl1 = and i32 %and1, -858993460
+  %and1b = lshr i32 %a, 2
+  %shr1 = and i32 %and1b, 858993459
+  %or1 = or i32 %shr1, %a
+  %or1b = or i32 %or1, %shl1
+  %and2 = shl i32 %or1b, 4
+  %shl2 = and i32 %and2, -252645136
+  %and2b = lshr i32 %or1b, 4
+  %shr2 = and i32 %and2b, 252645135
+  %or2 = or i32 %shr2, %or1b
+  %or2b = or i32 %or2, %shl2
+  %and3 = shl i32 %or2b, 8
+  %shl3 = and i32 %and3, -16711936
+  %and3b = lshr i32 %or2b, 8
+  %shr3 = and i32 %and3b, 16711935
+  %or3 = or i32 %shr3, %or2b
+  %or3b = or i32 %or3, %shl3
+  ret i32 %or3b
+}
+
+define i64 @gorc14_i64(i64 %a) nounwind {
+; RV64I-LABEL: gorc14_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, %hi(.LCPI19_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI19_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI19_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI19_1)(a2)
+; RV64I-NEXT:    slli a3, a0, 2
+; RV64I-NEXT:    and a1, a3, a1
+; RV64I-NEXT:    srli a3, a0, 2
+; RV64I-NEXT:    and a2, a3, a2
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    lui a1, %hi(.LCPI19_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI19_2)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI19_3)
+; RV64I-NEXT:    ld a2, %lo(.LCPI19_3)(a2)
+; RV64I-NEXT:    slli a3, a0, 4
+; RV64I-NEXT:    and a1, a3, a1
+; RV64I-NEXT:    srli a3, a0, 4
+; RV64I-NEXT:    and a2, a3, a2
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    lui a1, %hi(.LCPI19_4)
+; RV64I-NEXT:    ld a1, %lo(.LCPI19_4)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI19_5)
+; RV64I-NEXT:    ld a2, %lo(.LCPI19_5)(a2)
+; RV64I-NEXT:    slli a3, a0, 8
+; RV64I-NEXT:    and a1, a3, a1
+; RV64I-NEXT:    srli a3, a0, 8
+; RV64I-NEXT:    and a2, a3, a2
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBP-LABEL: gorc14_i64:
+; RV64ZBP:       # %bb.0:
+; RV64ZBP-NEXT:    orc2.h a0, a0
+; RV64ZBP-NEXT:    ret
+  %and1 = shl i64 %a, 2
+  %shl1 = and i64 %and1, -3689348814741910324
+  %and1b = lshr i64 %a, 2
+  %shr1 = and i64 %and1b, 3689348814741910323
+  %or1 = or i64 %shr1, %a
+  %or1b = or i64 %or1, %shl1
+  %and2 = shl i64 %or1b, 4
+  %shl2 = and i64 %and2, -1085102592571150096
+  %and2b = lshr i64 %or1b, 4
+  %shr2 = and i64 %and2b, 1085102592571150095
+  %or2 = or i64 %shr2, %or1b
+  %or2b = or i64 %or2, %shl2
+  %and3 = shl i64 %or2b, 8
+  %shl3 = and i64 %and3, -71777214294589696
+  %and3b = lshr i64 %or2b, 8
+  %shr3 = and i64 %and3b, 71777214294589695
+  %or3 = or i64 %shr3, %or2b
+  %or3b = or i64 %or3, %shl3
+  ret i64 %or3b
+}
+
 define signext i32 @gorc16_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: gorc16_i32:
 ; RV64I:       # %bb.0:
@@ -765,10 +974,10 @@ define signext i32 @gorc2b_i32(i32 signext %a) nounwind {
 define i64 @gorc2b_i64(i64 %a) nounwind {
 ; RV64I-LABEL: gorc2b_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI22_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI22_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI22_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI22_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI26_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI26_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI26_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI26_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 2
 ; RV64I-NEXT:    and a3, a3, a1
 ; RV64I-NEXT:    srli a4, a0, 2
@@ -864,20 +1073,20 @@ define signext i32 @gorc3b_i32(i32 signext %a) nounwind {
 define i64 @gorc3b_i64(i64 %a) nounwind {
 ; RV64I-LABEL: gorc3b_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI24_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI24_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI24_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI24_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI28_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI28_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI28_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI28_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 1
 ; RV64I-NEXT:    and a3, a3, a1
 ; RV64I-NEXT:    srli a4, a0, 1
 ; RV64I-NEXT:    and a4, a4, a2
 ; RV64I-NEXT:    or a0, a4, a0
 ; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    lui a3, %hi(.LCPI24_2)
-; RV64I-NEXT:    ld a3, %lo(.LCPI24_2)(a3)
-; RV64I-NEXT:    lui a4, %hi(.LCPI24_3)
-; RV64I-NEXT:    ld a4, %lo(.LCPI24_3)(a4)
+; RV64I-NEXT:    lui a3, %hi(.LCPI28_2)
+; RV64I-NEXT:    ld a3, %lo(.LCPI28_2)(a3)
+; RV64I-NEXT:    lui a4, %hi(.LCPI28_3)
+; RV64I-NEXT:    ld a4, %lo(.LCPI28_3)(a4)
 ; RV64I-NEXT:    slli a5, a0, 2
 ; RV64I-NEXT:    and a3, a5, a3
 ; RV64I-NEXT:    srli a5, a0, 2
@@ -982,10 +1191,10 @@ define signext i32 @grev1_i32(i32 signext %a) nounwind {
 define i64 @grev1_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev1_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI28_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI28_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI28_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI28_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI32_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI32_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI32_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI32_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 1
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 1
@@ -1034,10 +1243,10 @@ define signext i32 @grev2_i32(i32 signext %a) nounwind {
 define i64 @grev2_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev2_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI30_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI30_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI30_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI30_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI34_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI34_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI34_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI34_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 2
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 2
@@ -1100,19 +1309,19 @@ define signext i32 @grev3_i32(i32 signext %a) nounwind {
 define i64 @grev3_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev3_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI32_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI32_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI32_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI32_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI36_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI36_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI36_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI36_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 1
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI32_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI32_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI32_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI32_3)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI36_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI36_2)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI36_3)
+; RV64I-NEXT:    ld a2, %lo(.LCPI36_3)(a2)
 ; RV64I-NEXT:    slli a3, a0, 2
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 2
@@ -1166,10 +1375,10 @@ define signext i32 @grev4_i32(i32 signext %a) nounwind {
 define i64 @grev4_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev4_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI34_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI34_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI34_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI38_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI38_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI38_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI38_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 4
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 4
@@ -1232,19 +1441,19 @@ define signext i32 @grev5_i32(i32 signext %a) nounwind {
 define i64 @grev5_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev5_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI36_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI36_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI36_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI36_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI40_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI40_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI40_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI40_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 1
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI36_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI36_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI36_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI36_3)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI40_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI40_2)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI40_3)
+; RV64I-NEXT:    ld a2, %lo(.LCPI40_3)(a2)
 ; RV64I-NEXT:    slli a3, a0, 4
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 4
@@ -1313,19 +1522,19 @@ define signext i32 @grev6_i32(i32 signext %a) nounwind {
 define i64 @grev6_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev6_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI38_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI38_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI38_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI38_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI42_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI42_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI42_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI42_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 2
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 2
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI38_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI38_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI38_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI38_3)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI42_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI42_2)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI42_3)
+; RV64I-NEXT:    ld a2, %lo(.LCPI42_3)(a2)
 ; RV64I-NEXT:    slli a3, a0, 4
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 4
@@ -1407,28 +1616,28 @@ define signext i32 @grev7_i32(i32 signext %a) nounwind {
 define i64 @grev7_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev7_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI40_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI40_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI40_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI40_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI44_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI44_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI44_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI44_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 1
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI40_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI40_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI40_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI40_3)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI44_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI44_2)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI44_3)
+; RV64I-NEXT:    ld a2, %lo(.LCPI44_3)(a2)
 ; RV64I-NEXT:    slli a3, a0, 2
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 2
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI40_4)
-; RV64I-NEXT:    ld a1, %lo(.LCPI40_4)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI40_5)
-; RV64I-NEXT:    ld a2, %lo(.LCPI40_5)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI44_4)
+; RV64I-NEXT:    ld a1, %lo(.LCPI44_4)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI44_5)
+; RV64I-NEXT:    ld a2, %lo(.LCPI44_5)(a2)
 ; RV64I-NEXT:    slli a3, a0, 4
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 4
@@ -1487,10 +1696,10 @@ define signext i32 @grev8_i32(i32 signext %a) nounwind {
 define i64 @grev8_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev8_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI42_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI42_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI42_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI42_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI46_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI46_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI46_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI46_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 8
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 8
@@ -1553,19 +1762,19 @@ define signext i32 @grev12_i32(i32 signext %a) nounwind {
 define i64 @grev12_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev12_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI44_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI44_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI44_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI44_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI48_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI48_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI48_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI48_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 4
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 4
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI44_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI44_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI44_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI44_3)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI48_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI48_2)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI48_3)
+; RV64I-NEXT:    ld a2, %lo(.LCPI48_3)(a2)
 ; RV64I-NEXT:    slli a3, a0, 8
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 8
@@ -1647,28 +1856,28 @@ define signext i32 @grev14_i32(i32 signext %a) nounwind {
 define i64 @grev14_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev14_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI46_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI46_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI46_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI46_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI50_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI50_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI50_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI50_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 2
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 2
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI46_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI46_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI46_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI46_3)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI50_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI50_2)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI50_3)
+; RV64I-NEXT:    ld a2, %lo(.LCPI50_3)(a2)
 ; RV64I-NEXT:    slli a3, a0, 4
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 4
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI46_4)
-; RV64I-NEXT:    ld a1, %lo(.LCPI46_4)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI46_5)
-; RV64I-NEXT:    ld a2, %lo(.LCPI46_5)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI50_4)
+; RV64I-NEXT:    ld a1, %lo(.LCPI50_4)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI50_5)
+; RV64I-NEXT:    ld a2, %lo(.LCPI50_5)(a2)
 ; RV64I-NEXT:    slli a3, a0, 8
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 8
@@ -1841,19 +2050,19 @@ define signext i32 @grev3b_i32(i32 signext %a) nounwind {
 define i64 @grev3b_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev3b_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI53_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI53_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI53_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI53_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI57_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI57_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI57_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI57_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 2
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 2
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    lui a1, %hi(.LCPI53_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI53_2)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI53_3)
-; RV64I-NEXT:    ld a2, %lo(.LCPI53_3)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI57_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI57_2)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI57_3)
+; RV64I-NEXT:    ld a2, %lo(.LCPI57_3)(a2)
 ; RV64I-NEXT:    slli a3, a0, 1
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 1
@@ -1933,19 +2142,19 @@ define signext i32 @grev2b_i32(i32 signext %a) nounwind {
 define i64 @grev2b_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev2b_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI55_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI55_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI55_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI55_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI59_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI59_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI59_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI59_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 1
 ; RV64I-NEXT:    and a3, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    lui a3, %hi(.LCPI55_2)
-; RV64I-NEXT:    ld a3, %lo(.LCPI55_2)(a3)
-; RV64I-NEXT:    lui a4, %hi(.LCPI55_3)
-; RV64I-NEXT:    ld a4, %lo(.LCPI55_3)(a4)
+; RV64I-NEXT:    lui a3, %hi(.LCPI59_2)
+; RV64I-NEXT:    ld a3, %lo(.LCPI59_2)(a3)
+; RV64I-NEXT:    lui a4, %hi(.LCPI59_3)
+; RV64I-NEXT:    ld a4, %lo(.LCPI59_3)(a4)
 ; RV64I-NEXT:    slli a5, a0, 2
 ; RV64I-NEXT:    and a3, a5, a3
 ; RV64I-NEXT:    srli a0, a0, 2
@@ -2044,19 +2253,19 @@ define signext i32 @grev0_i32(i32 signext %a) nounwind {
 define i64 @grev0_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev0_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI57_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI57_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI57_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI57_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI61_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI61_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI61_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI61_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 1
 ; RV64I-NEXT:    and a3, a3, a1
 ; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    lui a3, %hi(.LCPI57_2)
-; RV64I-NEXT:    ld a3, %lo(.LCPI57_2)(a3)
-; RV64I-NEXT:    lui a4, %hi(.LCPI57_3)
-; RV64I-NEXT:    ld a4, %lo(.LCPI57_3)(a4)
+; RV64I-NEXT:    lui a3, %hi(.LCPI61_2)
+; RV64I-NEXT:    ld a3, %lo(.LCPI61_2)(a3)
+; RV64I-NEXT:    lui a4, %hi(.LCPI61_3)
+; RV64I-NEXT:    ld a4, %lo(.LCPI61_3)(a4)
 ; RV64I-NEXT:    slli a5, a0, 2
 ; RV64I-NEXT:    and a5, a5, a3
 ; RV64I-NEXT:    srli a0, a0, 2
@@ -2451,22 +2660,22 @@ define i64 @bitreverse_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    and a3, a4, a3
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    lui a3, %hi(.LCPI68_0)
-; RV64I-NEXT:    ld a3, %lo(.LCPI68_0)(a3)
+; RV64I-NEXT:    lui a3, %hi(.LCPI72_0)
+; RV64I-NEXT:    ld a3, %lo(.LCPI72_0)(a3)
 ; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    srli a1, a0, 4
 ; RV64I-NEXT:    and a1, a1, a3
 ; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    lui a2, %hi(.LCPI68_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI68_1)(a2)
+; RV64I-NEXT:    lui a2, %hi(.LCPI72_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI72_1)(a2)
 ; RV64I-NEXT:    slli a0, a0, 4
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    srli a1, a0, 2
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI68_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI68_2)(a2)
+; RV64I-NEXT:    lui a2, %hi(.LCPI72_2)
+; RV64I-NEXT:    ld a2, %lo(.LCPI72_2)(a2)
 ; RV64I-NEXT:    slli a0, a0, 2
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    srli a1, a0, 1
@@ -2574,20 +2783,20 @@ define i32 @bitreverse_bswap_i32(i32 %a) {
 define i64 @bitreverse_bswap_i64(i64 %a) {
 ; RV64I-LABEL: bitreverse_bswap_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI72_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI72_0)(a1)
+; RV64I-NEXT:    lui a1, %hi(.LCPI76_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI76_0)(a1)
 ; RV64I-NEXT:    srli a2, a0, 4
 ; RV64I-NEXT:    and a2, a2, a1
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI72_1)
-; RV64I-NEXT:    ld a1, %lo(.LCPI72_1)(a1)
+; RV64I-NEXT:    lui a1, %hi(.LCPI76_1)
+; RV64I-NEXT:    ld a1, %lo(.LCPI76_1)(a1)
 ; RV64I-NEXT:    slli a0, a0, 4
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    srli a2, a0, 2
 ; RV64I-NEXT:    and a2, a2, a1
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI72_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI72_2)(a1)
+; RV64I-NEXT:    lui a1, %hi(.LCPI76_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI76_2)(a1)
 ; RV64I-NEXT:    slli a0, a0, 2
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    srli a2, a0, 1
@@ -2641,14 +2850,14 @@ define signext i32 @shfl1_i32(i32 signext %a, i32 signext %b) nounwind {
 define i64 @shfl1_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: shfl1_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI74_1)
-; RV64I-NEXT:    ld a1, %lo(.LCPI74_1)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI74_0)
-; RV64I-NEXT:    ld a2, %lo(.LCPI74_0)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI78_1)
+; RV64I-NEXT:    ld a1, %lo(.LCPI78_1)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI78_0)
+; RV64I-NEXT:    ld a2, %lo(.LCPI78_0)(a2)
 ; RV64I-NEXT:    slli a3, a0, 1
 ; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    lui a3, %hi(.LCPI74_2)
-; RV64I-NEXT:    ld a3, %lo(.LCPI74_2)(a3)
+; RV64I-NEXT:    lui a3, %hi(.LCPI78_2)
+; RV64I-NEXT:    ld a3, %lo(.LCPI78_2)(a3)
 ; RV64I-NEXT:    and a2, a0, a2
 ; RV64I-NEXT:    or a1, a2, a1
 ; RV64I-NEXT:    srli a0, a0, 1
@@ -2705,14 +2914,14 @@ define signext i32 @shfl2_i32(i32 signext %a, i32 signext %b) nounwind {
 define i64 @shfl2_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: shfl2_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI76_1)
-; RV64I-NEXT:    ld a1, %lo(.LCPI76_1)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI76_0)
-; RV64I-NEXT:    ld a2, %lo(.LCPI76_0)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI80_1)
+; RV64I-NEXT:    ld a1, %lo(.LCPI80_1)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI80_0)
+; RV64I-NEXT:    ld a2, %lo(.LCPI80_0)(a2)
 ; RV64I-NEXT:    slli a3, a0, 2
 ; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    lui a3, %hi(.LCPI76_2)
-; RV64I-NEXT:    ld a3, %lo(.LCPI76_2)(a3)
+; RV64I-NEXT:    lui a3, %hi(.LCPI80_2)
+; RV64I-NEXT:    ld a3, %lo(.LCPI80_2)(a3)
 ; RV64I-NEXT:    and a2, a0, a2
 ; RV64I-NEXT:    or a1, a2, a1
 ; RV64I-NEXT:    srli a0, a0, 2
@@ -2769,13 +2978,13 @@ define signext i32 @shfl4_i32(i32 signext %a, i32 signext %b) nounwind {
 define i64 @shfl4_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: shfl4_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI78_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI78_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI78_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI78_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI82_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI82_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI82_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI82_1)(a2)
 ; RV64I-NEXT:    slli a3, a0, 4
-; RV64I-NEXT:    lui a4, %hi(.LCPI78_2)
-; RV64I-NEXT:    ld a4, %lo(.LCPI78_2)(a4)
+; RV64I-NEXT:    lui a4, %hi(.LCPI82_2)
+; RV64I-NEXT:    ld a4, %lo(.LCPI82_2)(a4)
 ; RV64I-NEXT:    and a2, a3, a2
 ; RV64I-NEXT:    and a1, a0, a1
 ; RV64I-NEXT:    srli a0, a0, 4


        


More information about the llvm-commits mailing list