[llvm] 21ded66 - [RISCV][GISel] Add zexti8 ComplexPattern.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 6 13:58:32 PST 2024


Author: Craig Topper
Date: 2024-11-06T13:58:25-08:00
New Revision: 21ded66dba0adfd34250df93b5321709883f5e94

URL: https://github.com/llvm/llvm-project/commit/21ded66dba0adfd34250df93b5321709883f5e94
DIFF: https://github.com/llvm/llvm-project/commit/21ded66dba0adfd34250df93b5321709883f5e94.diff

LOG: [RISCV][GISel] Add zexti8 ComplexPattern.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVGISel.td
    llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
    llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td
index 4ba6dd05579cd2..36881b02da2e40 100644
--- a/llvm/lib/Target/RISCV/RISCVGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVGISel.td
@@ -100,6 +100,8 @@ def gi_zexti32 : GIComplexOperandMatcher<s64, "selectZExtBits<32>">,
                  GIComplexPatternEquiv<zexti32>;
 def gi_zexti16 : GIComplexOperandMatcher<s32, "selectZExtBits<16>">,
                  GIComplexPatternEquiv<zexti16>;
+def gi_zexti8  : GIComplexOperandMatcher<s32, "selectZExtBits<8>">,
+                 GIComplexPatternEquiv<zexti8>;
 
 // FIXME: Canonicalize (sub X, C) -> (add X, -C) earlier.
 def : Pat<(XLenVT (sub GPR:$rs1, simm12Plus1:$imm)),

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
index 529e821504405a..da9bffcdb0d582 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
@@ -113,13 +113,18 @@ define i32 @packh_i32(i32 %a, i32 %b) nounwind {
 }
 
 define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: packh_i32_2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    andi a0, a0, 255
-; CHECK-NEXT:    andi a1, a1, 255
-; CHECK-NEXT:    slli a1, a1, 8
-; CHECK-NEXT:    or a0, a1, a0
-; CHECK-NEXT:    ret
+; RV32I-LABEL: packh_i32_2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    andi a1, a1, 255
+; RV32I-NEXT:    slli a1, a1, 8
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBKB-LABEL: packh_i32_2:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    packh a0, a0, a1
+; RV32ZBKB-NEXT:    ret
   %and = and i32 %a, 255
   %and1 = and i32 %b, 255
   %shl = shl i32 %and1, 8
@@ -146,16 +151,25 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
 }
 
 define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
-; CHECK-LABEL: packh_i64_2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    andi a0, a0, 255
-; CHECK-NEXT:    andi a1, a2, 255
-; CHECK-NEXT:    slli a2, a1, 8
-; CHECK-NEXT:    slli a3, zero, 8
-; CHECK-NEXT:    srli a1, a1, 24
-; CHECK-NEXT:    or a1, a3, a1
-; CHECK-NEXT:    or a0, a2, a0
-; CHECK-NEXT:    ret
+; RV32I-LABEL: packh_i64_2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    andi a1, a2, 255
+; RV32I-NEXT:    slli a2, a1, 8
+; RV32I-NEXT:    slli a3, zero, 8
+; RV32I-NEXT:    srli a1, a1, 24
+; RV32I-NEXT:    or a1, a3, a1
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBKB-LABEL: packh_i64_2:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    andi a1, a2, 255
+; RV32ZBKB-NEXT:    slli a3, zero, 8
+; RV32ZBKB-NEXT:    srli a1, a1, 24
+; RV32ZBKB-NEXT:    or a1, a3, a1
+; RV32ZBKB-NEXT:    packh a0, a0, a2
+; RV32ZBKB-NEXT:    ret
   %and = and i64 %a, 255
   %and1 = and i64 %b, 255
   %shl = shl i64 %and1, 8

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
index 8c5a2ec3dab4fb..abeefb3b1c97fd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
@@ -230,10 +230,7 @@ define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
 ;
 ; RV64ZBKB-LABEL: packh_i64_2:
 ; RV64ZBKB:       # %bb.0:
-; RV64ZBKB-NEXT:    andi a0, a0, 255
-; RV64ZBKB-NEXT:    andi a1, a1, 255
-; RV64ZBKB-NEXT:    slli a1, a1, 8
-; RV64ZBKB-NEXT:    or a0, a1, a0
+; RV64ZBKB-NEXT:    packh a0, a0, a1
 ; RV64ZBKB-NEXT:    ret
   %and = and i64 %a, 255
   %and1 = and i64 %b, 255


        


More information about the llvm-commits mailing list