[llvm] [RISCV] Teach RISCVMakeCompressible handle byte/half load/store for Zbc. (PR #83375)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 25 21:56:19 PDT 2024


================
@@ -0,0 +1,582 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - %s -mtriple=riscv32 -mattr=+zcb -simplify-mir \
+# RUN:   -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+# RUN: llc -o - %s -mtriple=riscv64 -mattr=+zcb -simplify-mir \
+# RUN:   -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+
+--- |
+  define void @store_common_value_i8(ptr %a, ptr %b, ptr %c) #0 {
+  entry:
+    store i8 0, ptr %a, align 1
+    store i8 0, ptr %b, align 1
+    store i8 0, ptr %c, align 1
+    ret void
+  }
+
+  define void @store_common_value_i16(ptr %a, ptr %b, ptr %c) #0 {
+  entry:
+    store i16 0, ptr %a, align 2
+    store i16 0, ptr %b, align 2
+    store i16 0, ptr %c, align 2
+    ret void
+  }
+
+  define void @store_common_ptr_i8(ptr %p) #0 {
+  entry:
+    store volatile i8 1, ptr %p, align 1
+    store volatile i8 3, ptr %p, align 1
+    store volatile i8 5, ptr %p, align 1
+    ret void
+  }
+
+  define void @store_common_ptr_i16(ptr %p) #0 {
+  entry:
+    store volatile i16 1, ptr %p, align 2
+    store volatile i16 3, ptr %p, align 2
+    store volatile i16 5, ptr %p, align 2
+    ret void
+  }
+
+  define void @load_common_ptr_i8(ptr %p) #0 {
+  entry:
+    %0 = load volatile i8, ptr %p, align 1
+    %a = sext i8 %0 to i32
+    %1 = load volatile i8, ptr %p, align 1
+    %2 = load volatile i8, ptr %p, align 1
+    ret void
+  }
+
+  define void @load_common_ptr_s16(ptr %p) #0 {
+  entry:
+    %0 = load volatile i16, ptr %p, align 2
+    %1 = load volatile i16, ptr %p, align 2
+    %2 = load volatile i16, ptr %p, align 2
+    ret void
+  }
+
+  define void @load_common_ptr_u16(ptr %p) #0 {
+  entry:
+    %0 = load volatile i16, ptr %p, align 2
+    %1 = load volatile i16, ptr %p, align 2
+    %2 = load volatile i16, ptr %p, align 2
+    ret void
+  }
+
+  define void @store_large_offset_i8(ptr %p) #0 {
+  entry:
+    %0 = getelementptr inbounds i8, ptr %p, i8 100
+    store volatile i8 1, ptr %0, align 1
+    %1 = getelementptr inbounds i8, ptr %p, i8 101
+    store volatile i8 3, ptr %1, align 1
+    %2 = getelementptr inbounds i8, ptr %p, i8 102
+    store volatile i8 5, ptr %2, align 1
+    %3 = getelementptr inbounds i8, ptr %p, i8 103
+    store volatile i8 7, ptr %3, align 1
+    ret void
+  }
+
+  define void @store_large_offset_i16(ptr %p) #0 {
+  entry:
+    %0 = getelementptr inbounds i16, ptr %p, i16 100
+    store volatile i16 1, ptr %0, align 2
+    %1 = getelementptr inbounds i16, ptr %p, i16 100
+    store volatile i16 3, ptr %1, align 2
+    %2 = getelementptr inbounds i16, ptr %p, i16 101
+    store volatile i16 3, ptr %1, align 2
+    %3 = getelementptr inbounds i16, ptr %p, i16 101
+    store volatile i16 7, ptr %3, align 2
+    ret void
+  }
+
+  define void @load_large_offset_i8(ptr %p) #0 {
+  entry:
+    %0 = getelementptr inbounds i8, ptr %p, i8 100
+    %a = load volatile i8, ptr %0
+    %1 = getelementptr inbounds i8, ptr %p, i8 101
+    %b = load volatile i8, ptr %1
+    %2 = getelementptr inbounds i8, ptr %p, i8 102
+    %c = load volatile i8, ptr %2
+    %3 = getelementptr inbounds i8, ptr %p, i8 103
+    %d = load volatile i8, ptr %3
+    ret void
+  }
+
+  define void @load_large_offset_s16(ptr %p) #0 {
+  entry:
+    %0 = getelementptr inbounds i16, ptr %p, i16 100
+    %a = load volatile i16, ptr %0, align 2
+    %1 = getelementptr inbounds i16, ptr %p, i16 100
+    %b = load volatile i16, ptr %1, align 2
+    %2 = getelementptr inbounds i16, ptr %p, i16 101
+    %c = load volatile i16, ptr %2, align 2
+    %3 = getelementptr inbounds i16, ptr %p, i16 101
+    %d = load volatile i16, ptr %3, align 2
+    ret void
+  }
+
+  define void @load_large_offset_u16(ptr %p) #0 {
+  entry:
+    %0 = getelementptr inbounds i16, ptr %p, i16 100
+    %a = load volatile i16, ptr %0, align 2
+    %1 = getelementptr inbounds i16, ptr %p, i16 100
+    %b = load volatile i16, ptr %1, align 2
+    %2 = getelementptr inbounds i16, ptr %p, i16 101
+    %c = load volatile i16, ptr %2, align 2
+    %3 = getelementptr inbounds i16, ptr %p, i16 101
+    %d = load volatile i16, ptr %3, align 2
+    ret void
+  }
+  define void @store_large_offset_no_opt_i8(ptr %p) #0 {
+  entry:
+    %0 = getelementptr inbounds i8, ptr %p, i8 100
+    store volatile i8 1, ptr %0, align 1
+    %1 = getelementptr inbounds i8, ptr %p, i8 101
+    store volatile i8 3, ptr %1, align 1
+    %2 = getelementptr inbounds i8, ptr %p, i8 104
+    store volatile i8 5, ptr %2, align 1
+    ret void
+  }
+
+  define void @store_large_offset_no_opt_i16(ptr %p) #0 {
+  entry:
+    %0 = getelementptr inbounds i16, ptr %p, i16 100
+    %a = load volatile i16, ptr %0, align 2
+    %1 = getelementptr inbounds i16, ptr %p, i16 100
+    %b = load volatile i16, ptr %1, align 2
+    %2 = getelementptr inbounds i16, ptr %p, i16 101
+    %c = load volatile i16, ptr %2, align 2
+    %3 = getelementptr inbounds i16, ptr %p, i16 102
+    %d = load volatile i16, ptr %3, align 2
+    ret void
+  }
+
+  define void @load_large_offset_no_opt_i8(ptr %p) #0 {
+  entry:
+    %0 = getelementptr inbounds i8, ptr %p, i8 100
+    %a = load volatile i8, ptr %0
+    %1 = getelementptr inbounds i8, ptr %p, i8 101
+    %b = load volatile i8, ptr %1
+    %2 = getelementptr inbounds i8, ptr %p, i8 103
+    %c = load volatile i8, ptr %2
+    ret void
+  }
+
+  define void @load_large_offset_no_opt_s16(ptr %p) #0 {
+  entry:
+    %0 = getelementptr inbounds i16, ptr %p, i16 100
+    %a = load volatile i16, ptr %0, align 2
+    %1 = getelementptr inbounds i16, ptr %p, i16 101
+    %c = load volatile i16, ptr %1, align 2
+    %2 = getelementptr inbounds i16, ptr %p, i16 102
+    %d = load volatile i16, ptr %2, align 2
+    ret void
+  }
+
+  define void @load_large_offset_no_opt_u16(ptr %p) #0 {
+  entry:
+    %0 = getelementptr inbounds i16, ptr %p, i16 100
+    %a = load volatile i16, ptr %0, align 2
+    %1 = getelementptr inbounds i16, ptr %p, i16 101
+    %c = load volatile i16, ptr %1, align 2
+    %2 = getelementptr inbounds i16, ptr %p, i16 102
+    %d = load volatile i16, ptr %2, align 2
+    ret void
+  }
+  attributes #0 = { minsize }
+
+...
+---
+name:            store_common_value_i8
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12
+
+    ; CHECK-LABEL: name: store_common_value_i8
+    ; CHECK: liveins: $x10, $x11, $x12
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: $x13 = ADDI $x0, 0
+    ; CHECK-NEXT: SB $x13, killed renamable $x10, 0 :: (store (s8) into %ir.a)
+    ; CHECK-NEXT: SB $x13, killed renamable $x11, 0 :: (store (s8) into %ir.b)
+    ; CHECK-NEXT: SB $x13, killed renamable $x12, 0 :: (store (s8) into %ir.c)
+    ; CHECK-NEXT: PseudoRET
+    SB $x0, killed renamable $x10, 0 :: (store (s8) into %ir.a)
+    SB $x0, killed renamable $x11, 0 :: (store (s8) into %ir.b)
+    SB $x0, killed renamable $x12, 0 :: (store (s8) into %ir.c)
+    PseudoRET
+
+...
+---
+name:            store_common_value_i16
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12
+
+    ; CHECK-LABEL: name: store_common_value_i16
+    ; CHECK: liveins: $x10, $x11, $x12
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: $x13 = ADDI $x0, 0
+    ; CHECK-NEXT: SH $x13, killed renamable $x10, 0 :: (store (s16) into %ir.a)
+    ; CHECK-NEXT: SH $x13, killed renamable $x11, 0 :: (store (s16) into %ir.b)
+    ; CHECK-NEXT: SH $x13, killed renamable $x12, 0 :: (store (s16) into %ir.c)
+    ; CHECK-NEXT: PseudoRET
+    SH $x0, killed renamable $x10, 0 :: (store (s16) into %ir.a)
+    SH $x0, killed renamable $x11, 0 :: (store (s16) into %ir.b)
+    SH $x0, killed renamable $x12, 0 :: (store (s16) into %ir.c)
+    PseudoRET
+
+...
+---
+name:            store_common_ptr_i8
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; CHECK-LABEL: name: store_common_ptr_i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+    ; CHECK-NEXT: SB killed renamable $x11, renamable $x10, 0 :: (volatile store (s8) into %ir.p)
+    ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+    ; CHECK-NEXT: SB killed renamable $x11, renamable $x10, 0 :: (volatile store (s8) into %ir.p)
+    ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+    ; CHECK-NEXT: SB killed renamable $x11, killed renamable $x10, 0 :: (volatile store (s8) into %ir.p)
+    ; CHECK-NEXT: PseudoRET
+    renamable $x11 = ADDI $x0, 1
+    SB killed renamable $x11, renamable $x10, 0 :: (volatile store (s8) into %ir.p)
+    renamable $x11 = ADDI $x0, 3
+    SB killed renamable $x11, renamable $x10, 0 :: (volatile store (s8) into %ir.p)
+    renamable $x11 = ADDI $x0, 5
+    SB killed renamable $x11, killed renamable $x10, 0 :: (volatile store (s8) into %ir.p)
+    PseudoRET
+
+...
+---
+name:            store_common_ptr_i16
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; CHECK-LABEL: name: store_common_ptr_i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+    ; CHECK-NEXT: SH killed renamable $x11, renamable $x10, 0 :: (volatile store (s16) into %ir.p)
+    ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+    ; CHECK-NEXT: SH killed renamable $x11, renamable $x10, 0 :: (volatile store (s16) into %ir.p)
+    ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+    ; CHECK-NEXT: SH killed renamable $x11, killed renamable $x10, 0 :: (volatile store (s16) into %ir.p)
+    ; CHECK-NEXT: PseudoRET
+    renamable $x11 = ADDI $x0, 1
+    SH killed renamable $x11, renamable $x10, 0 :: (volatile store (s16) into %ir.p)
+    renamable $x11 = ADDI $x0, 3
+    SH killed renamable $x11, renamable $x10, 0 :: (volatile store (s16) into %ir.p)
+    renamable $x11 = ADDI $x0, 5
+    SH killed renamable $x11, killed renamable $x10, 0 :: (volatile store (s16) into %ir.p)
+    PseudoRET
+
+...
+---
+name:            load_common_ptr_i8
+body:             |
+  bb.0.entry:
+    liveins: $x16
+
+    ; CHECK-LABEL: name: load_common_ptr_i8
+    ; CHECK: liveins: $x16
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: dead $x10 = LB renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+    ; CHECK-NEXT: dead $x10 = LB renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+    ; CHECK-NEXT: dead $x10 = LBU killed renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+    ; CHECK-NEXT: PseudoRET
+    dead $x10 = LB renamable $x16, 0 :: (volatile load (s8) from %ir.p)
----------------
topperc wrote:

I think we need to use all LBU here to make this test change anything

https://github.com/llvm/llvm-project/pull/83375


More information about the llvm-commits mailing list