[llvm] 0a459dd - [RISCV] Add tests for selecting G_BRCOND+G_ICMP. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 13 15:30:40 PST 2023


Author: Craig Topper
Date: 2023-11-13T15:29:34-08:00
New Revision: 0a459dd4e9f0b1009cbcb500d2ab2dce78a90f11

URL: https://github.com/llvm/llvm-project/commit/0a459dd4e9f0b1009cbcb500d2ab2dce78a90f11
DIFF: https://github.com/llvm/llvm-project/commit/0a459dd4e9f0b1009cbcb500d2ab2dce78a90f11.diff

LOG: [RISCV] Add tests for selecting G_BRCOND+G_ICMP. NFC

These should have been part of e0e0891d741588684b0803d7724e5080f9c75537

Added: 
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/brcond-rv32.mir
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/brcond-rv64.mir

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/brcond-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/brcond-rv32.mir
new file mode 100644
index 000000000000000..13c4d49b3c4039d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/brcond-rv32.mir
@@ -0,0 +1,188 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV32I %s
+
+---
+name:            brcond
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  ; RV32I-LABEL: name: brcond
+  ; RV32I: bb.0:
+  ; RV32I-NEXT:   liveins: $x10, $x11, $x12
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x10
+  ; RV32I-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x11
+  ; RV32I-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x12
+  ; RV32I-NEXT:   [[LW:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BEQ [[LW]], [[COPY]], %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.1
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.1:
+  ; RV32I-NEXT:   [[LW1:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BNE [[LW1]], [[COPY]], %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.2
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.2:
+  ; RV32I-NEXT:   [[LW2:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BLT [[LW2]], [[COPY]], %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.3
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.3:
+  ; RV32I-NEXT:   [[LW3:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BGE [[LW3]], [[COPY]], %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.4
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.4:
+  ; RV32I-NEXT:   [[LW4:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BLTU [[LW4]], [[COPY]], %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.5
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.5:
+  ; RV32I-NEXT:   [[LW5:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BGEU [[LW5]], [[COPY]], %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.6
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.6:
+  ; RV32I-NEXT:   [[LW6:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BLT [[COPY]], [[LW6]], %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.7
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.7:
+  ; RV32I-NEXT:   [[LW7:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BGE [[COPY]], [[LW7]], %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.8
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.8:
+  ; RV32I-NEXT:   [[LW8:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BLTU [[COPY]], [[LW8]], %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.9
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.9:
+  ; RV32I-NEXT:   [[LW9:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BGEU [[COPY]], [[LW9]], %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.10
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.10:
+  ; RV32I-NEXT:   [[LW10:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1
+  ; RV32I-NEXT:   BNE [[ANDI]], $x0, %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.11
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.11:
+  ; RV32I-NEXT:   successors: %bb.14(0x50000000), %bb.12(0x30000000)
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT:   [[LW11:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BGE [[LW11]], $x0, %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.12
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.12:
+  ; RV32I-NEXT:   successors: %bb.14(0x30000000), %bb.13(0x50000000)
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT:   [[LW12:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT:   BGE $x0, [[LW12]], %bb.14
+  ; RV32I-NEXT:   PseudoBR %bb.13
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.13:
+  ; RV32I-NEXT:   [[LW13:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (volatile load (s32))
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT: bb.14:
+  ; RV32I-NEXT:   PseudoRET
+  bb.1:
+    liveins: $x10, $x11, $x12
+
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(p0) = COPY $x11
+    %3:gprb(s32) = COPY $x12
+    %26:gprb(s32) = G_CONSTANT i32 -1
+    %29:gprb(s32) = G_CONSTANT i32 1
+    %4:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %56:gprb(s32) = G_ICMP intpred(eq), %4(s32), %0
+    G_BRCOND %56(s32), %bb.15
+    G_BR %bb.2
+
+  bb.2:
+    %6:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %54:gprb(s32) = G_ICMP intpred(ne), %6(s32), %0
+    G_BRCOND %54(s32), %bb.15
+    G_BR %bb.3
+
+  bb.3:
+    %8:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %52:gprb(s32) = G_ICMP intpred(slt), %8(s32), %0
+    G_BRCOND %52(s32), %bb.15
+    G_BR %bb.4
+
+  bb.4:
+    %10:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %50:gprb(s32) = G_ICMP intpred(sge), %10(s32), %0
+    G_BRCOND %50(s32), %bb.15
+    G_BR %bb.5
+
+  bb.5:
+    %12:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %48:gprb(s32) = G_ICMP intpred(ult), %12(s32), %0
+    G_BRCOND %48(s32), %bb.15
+    G_BR %bb.6
+
+  bb.6:
+    %14:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %46:gprb(s32) = G_ICMP intpred(uge), %14(s32), %0
+    G_BRCOND %46(s32), %bb.15
+    G_BR %bb.7
+
+  bb.7:
+    %16:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %44:gprb(s32) = G_ICMP intpred(sgt), %16(s32), %0
+    G_BRCOND %44(s32), %bb.15
+    G_BR %bb.8
+
+  bb.8:
+    %18:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %42:gprb(s32) = G_ICMP intpred(sle), %18(s32), %0
+    G_BRCOND %42(s32), %bb.15
+    G_BR %bb.9
+
+  bb.9:
+    %20:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %40:gprb(s32) = G_ICMP intpred(ugt), %20(s32), %0
+    G_BRCOND %40(s32), %bb.15
+    G_BR %bb.10
+
+  bb.10:
+    %22:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %38:gprb(s32) = G_ICMP intpred(ule), %22(s32), %0
+    G_BRCOND %38(s32), %bb.15
+    G_BR %bb.11
+
+  bb.11:
+    %24:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %57:gprb(s32) = G_CONSTANT i32 1
+    %36:gprb(s32) = G_AND %3, %57
+    G_BRCOND %36(s32), %bb.15
+    G_BR %bb.12
+
+  bb.12:
+    successors: %bb.15(0x50000000), %bb.13(0x30000000)
+
+    %25:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %35:gprb(s32) = G_ICMP intpred(sgt), %25(s32), %26
+    G_BRCOND %35(s32), %bb.15
+    G_BR %bb.13
+
+  bb.13:
+    successors: %bb.15(0x30000000), %bb.14(0x50000000)
+
+    %28:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+    %33:gprb(s32) = G_ICMP intpred(slt), %28(s32), %29
+    G_BRCOND %33(s32), %bb.15
+    G_BR %bb.14
+
+  bb.14:
+    %31:gprb(s32) = G_LOAD %1(p0) :: (volatile load (s32))
+
+  bb.15:
+    PseudoRET
+
+...

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/brcond-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/brcond-rv64.mir
new file mode 100644
index 000000000000000..de13516e552b128
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/brcond-rv64.mir
@@ -0,0 +1,188 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV64I %s
+
+---
+name:            brcond
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  ; RV64I-LABEL: name: brcond
+  ; RV64I: bb.0:
+  ; RV64I-NEXT:   liveins: $x10, $x11, $x12
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x10
+  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x11
+  ; RV64I-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x12
+  ; RV64I-NEXT:   [[LD:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BEQ [[LD]], [[COPY]], %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.1
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.1:
+  ; RV64I-NEXT:   [[LD1:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BNE [[LD1]], [[COPY]], %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.2
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.2:
+  ; RV64I-NEXT:   [[LD2:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BLT [[LD2]], [[COPY]], %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.3
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.3:
+  ; RV64I-NEXT:   [[LD3:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BGE [[LD3]], [[COPY]], %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.4
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.4:
+  ; RV64I-NEXT:   [[LD4:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BLTU [[LD4]], [[COPY]], %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.5
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.5:
+  ; RV64I-NEXT:   [[LD5:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BGEU [[LD5]], [[COPY]], %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.6
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.6:
+  ; RV64I-NEXT:   [[LD6:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BLT [[COPY]], [[LD6]], %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.7
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.7:
+  ; RV64I-NEXT:   [[LD7:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BGE [[COPY]], [[LD7]], %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.8
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.8:
+  ; RV64I-NEXT:   [[LD8:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BLTU [[COPY]], [[LD8]], %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.9
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.9:
+  ; RV64I-NEXT:   [[LD9:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BGEU [[COPY]], [[LD9]], %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.10
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.10:
+  ; RV64I-NEXT:   [[LD10:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1
+  ; RV64I-NEXT:   BNE [[ANDI]], $x0, %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.11
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.11:
+  ; RV64I-NEXT:   successors: %bb.14(0x50000000), %bb.12(0x30000000)
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT:   [[LD11:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BGE [[LD11]], $x0, %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.12
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.12:
+  ; RV64I-NEXT:   successors: %bb.14(0x30000000), %bb.13(0x50000000)
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT:   [[LD12:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT:   BGE $x0, [[LD12]], %bb.14
+  ; RV64I-NEXT:   PseudoBR %bb.13
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.13:
+  ; RV64I-NEXT:   [[LD13:%[0-9]+]]:gpr = LD [[COPY1]], 0 :: (volatile load (s64))
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT: bb.14:
+  ; RV64I-NEXT:   PseudoRET
+  bb.1:
+    liveins: $x10, $x11, $x12
+
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(p0) = COPY $x11
+    %3:gprb(s64) = COPY $x12
+    %26:gprb(s64) = G_CONSTANT i64 -1
+    %29:gprb(s64) = G_CONSTANT i64 1
+    %4:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %56:gprb(s64) = G_ICMP intpred(eq), %4(s64), %0
+    G_BRCOND %56(s64), %bb.15
+    G_BR %bb.2
+
+  bb.2:
+    %6:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %54:gprb(s64) = G_ICMP intpred(ne), %6(s64), %0
+    G_BRCOND %54(s64), %bb.15
+    G_BR %bb.3
+
+  bb.3:
+    %8:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %52:gprb(s64) = G_ICMP intpred(slt), %8(s64), %0
+    G_BRCOND %52(s64), %bb.15
+    G_BR %bb.4
+
+  bb.4:
+    %10:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %50:gprb(s64) = G_ICMP intpred(sge), %10(s64), %0
+    G_BRCOND %50(s64), %bb.15
+    G_BR %bb.5
+
+  bb.5:
+    %12:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %48:gprb(s64) = G_ICMP intpred(ult), %12(s64), %0
+    G_BRCOND %48(s64), %bb.15
+    G_BR %bb.6
+
+  bb.6:
+    %14:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %46:gprb(s64) = G_ICMP intpred(uge), %14(s64), %0
+    G_BRCOND %46(s64), %bb.15
+    G_BR %bb.7
+
+  bb.7:
+    %16:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %44:gprb(s64) = G_ICMP intpred(sgt), %16(s64), %0
+    G_BRCOND %44(s64), %bb.15
+    G_BR %bb.8
+
+  bb.8:
+    %18:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %42:gprb(s64) = G_ICMP intpred(sle), %18(s64), %0
+    G_BRCOND %42(s64), %bb.15
+    G_BR %bb.9
+
+  bb.9:
+    %20:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %40:gprb(s64) = G_ICMP intpred(ugt), %20(s64), %0
+    G_BRCOND %40(s64), %bb.15
+    G_BR %bb.10
+
+  bb.10:
+    %22:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %38:gprb(s64) = G_ICMP intpred(ule), %22(s64), %0
+    G_BRCOND %38(s64), %bb.15
+    G_BR %bb.11
+
+  bb.11:
+    %24:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %57:gprb(s64) = G_CONSTANT i64 1
+    %36:gprb(s64) = G_AND %3, %57
+    G_BRCOND %36(s64), %bb.15
+    G_BR %bb.12
+
+  bb.12:
+    successors: %bb.15(0x50000000), %bb.13(0x30000000)
+
+    %25:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %35:gprb(s64) = G_ICMP intpred(sgt), %25(s64), %26
+    G_BRCOND %35(s64), %bb.15
+    G_BR %bb.13
+
+  bb.13:
+    successors: %bb.15(0x30000000), %bb.14(0x50000000)
+
+    %28:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+    %33:gprb(s64) = G_ICMP intpred(slt), %28(s64), %29
+    G_BRCOND %33(s64), %bb.15
+    G_BR %bb.14
+
+  bb.14:
+    %31:gprb(s64) = G_LOAD %1(p0) :: (volatile load (s64))
+
+  bb.15:
+    PseudoRET
+
+...


        


More information about the llvm-commits mailing list