[llvm] 2d8c159 - [MIRVRegNamer] Avoid opcode hash collision

John Brawn via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 2 06:53:37 PDT 2022


Author: John Brawn
Date: 2022-11-02T13:53:12Z
New Revision: 2d8c1597e51c39d8db1c9428d65e6ef6d6a1d5c1

URL: https://github.com/llvm/llvm-project/commit/2d8c1597e51c39d8db1c9428d65e6ef6d6a1d5c1
DIFF: https://github.com/llvm/llvm-project/commit/2d8c1597e51c39d8db1c9428d65e6ef6d6a1d5c1.diff

LOG: [MIRVRegNamer] Avoid opcode hash collision

D121929 happens to cause CodeGen/MIR/AArch64/mirnamer.mir to fail due
to a hash collision caused by adding two extra opcodes. The collision
is only in the top 19 bits of the hashed opcode so fix this by just
using the whole hash (in fixed width hex for consistency) instead of
the top 5 decimal digits.

Differential Revision: https://reviews.llvm.org/D137155

Added: 
    

Modified: 
    llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
    llvm/test/CodeGen/MIR/AArch64/mir-canon-constant-pool-hash.mir
    llvm/test/CodeGen/MIR/AArch64/mir-canon-jump-table.mir
    llvm/test/CodeGen/MIR/AArch64/mirCanonCopyCopyProp.mir
    llvm/test/CodeGen/MIR/AArch64/mirCanonIdempotent.mir
    llvm/test/CodeGen/MIR/AArch64/mirnamer.mir
    llvm/test/CodeGen/MIR/AMDGPU/mir-canon-multi.mir
    llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir
    llvm/test/CodeGen/MIR/X86/mir-canon-hash-bb.mir
    llvm/test/CodeGen/MIR/X86/mircanon-flags.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
index a2abe71a6bd7b..35c9aebc119c6 100644
--- a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
+++ b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
@@ -62,7 +62,8 @@ std::string VRegRenamer::getInstructionOpcodeHash(MachineInstr &MI) {
                                 /* HashConstantPoolIndices */ true,
                                 /* HashMemOperands */ true);
     assert(Hash && "Expected non-zero Hash");
-    return std::to_string(Hash).substr(0, 5);
+    OS << format_hex_no_prefix(Hash, 16, true);
+    return OS.str();
   }
 
   // Gets a hashable artifact from a given MachineOperand (ie an unsigned).
@@ -132,7 +133,8 @@ std::string VRegRenamer::getInstructionOpcodeHash(MachineInstr &MI) {
   }
 
   auto HashMI = hash_combine_range(MIOperands.begin(), MIOperands.end());
-  return std::to_string(HashMI).substr(0, 5);
+  OS << format_hex_no_prefix(HashMI, 16, true);
+  return OS.str();
 }
 
 unsigned VRegRenamer::createVirtualRegister(unsigned VReg) {

diff  --git a/llvm/test/CodeGen/MIR/AArch64/mir-canon-constant-pool-hash.mir b/llvm/test/CodeGen/MIR/AArch64/mir-canon-constant-pool-hash.mir
index 78ed554687fa2..46da2af6bdf04 100644
--- a/llvm/test/CodeGen/MIR/AArch64/mir-canon-constant-pool-hash.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/mir-canon-constant-pool-hash.mir
@@ -14,8 +14,8 @@ constants:
 body: |
   bb.0:
     ; Test that we no longer have hash collisions between two 
diff erent consts:
-    ;CHECK: %bb{{[0-9]+}}_{{[0-9]+}}__1:gpr64common = ADR
-    ;CHECK: %bb{{[0-9]+}}_{{[0-9]+}}__1:gpr64common = ADR
+    ;CHECK: %bb{{[0-9a-f]+}}_{{[0-9a-f]+}}__1:gpr64common = ADR
+    ;CHECK: %bb{{[0-9a-f]+}}_{{[0-9a-f]+}}__1:gpr64common = ADR
     %vreg0:gpr64common = ADRP target-flags(aarch64-page) %const.0
     %vreg1:gpr64common = ADRP target-flags(aarch64-page) %const.1
 ...

diff  --git a/llvm/test/CodeGen/MIR/AArch64/mir-canon-jump-table.mir b/llvm/test/CodeGen/MIR/AArch64/mir-canon-jump-table.mir
index 6d3124c61db1b..a5ffd6e4ce3f9 100644
--- a/llvm/test/CodeGen/MIR/AArch64/mir-canon-jump-table.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/mir-canon-jump-table.mir
@@ -21,10 +21,10 @@ body:             |
   bb.2:
   bb.3:
   bb.7:
-    ;CHECK: %bb{{[0-9]+}}_{{[0-9]+}}__1:_(p0) = G_JUMP_TABLE %jump-table.0
-    ;CHECK: %bb{{[0-9]+}}_{{[0-9]+}}__1:_(p0) = G_JUMP_TABLE %jump-table.1
-    ;CHECK: %bb{{[0-9]+}}_{{[0-9]+}}__1:_(p0) = G_JUMP_TABLE %jump-table.2
-    ;CHECK: %bb{{[0-9]+}}_{{[0-9]+}}__1:_(p0) = G_JUMP_TABLE %jump-table.3
+    ;CHECK: %bb{{[0-9a-f]+}}_{{[0-9a-f]+}}__1:_(p0) = G_JUMP_TABLE %jump-table.0
+    ;CHECK: %bb{{[0-9a-f]+}}_{{[0-9a-f]+}}__1:_(p0) = G_JUMP_TABLE %jump-table.1
+    ;CHECK: %bb{{[0-9a-f]+}}_{{[0-9a-f]+}}__1:_(p0) = G_JUMP_TABLE %jump-table.2
+    ;CHECK: %bb{{[0-9a-f]+}}_{{[0-9a-f]+}}__1:_(p0) = G_JUMP_TABLE %jump-table.3
     %a:_(p0) = G_JUMP_TABLE %jump-table.0
     %b:_(p0) = G_JUMP_TABLE %jump-table.1
     %c:_(p0) = G_JUMP_TABLE %jump-table.2

diff  --git a/llvm/test/CodeGen/MIR/AArch64/mirCanonCopyCopyProp.mir b/llvm/test/CodeGen/MIR/AArch64/mirCanonCopyCopyProp.mir
index 21a7dddc98591..daf78187c4849 100644
--- a/llvm/test/CodeGen/MIR/AArch64/mirCanonCopyCopyProp.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/mirCanonCopyCopyProp.mir
@@ -40,7 +40,7 @@ body: |
 
     %42:gpr32 = LDRWui %stack.0, 0 :: (dereferenceable load (s64))
 
-    ;CHECK: %bb0_{{[0-9]+}}__1:gpr32 = LDRWui %stack.0, 0 :: (dereferenceable load (s64))
+    ;CHECK: %bb0_{{[0-9a-f]+}}__1:gpr32 = LDRWui %stack.0, 0 :: (dereferenceable load (s64))
     ;CHECK-NEXT: $w0 = COPY %bb0_
     ;CHECK-NEXT: RET_ReallyLR implicit $w0
 

diff  --git a/llvm/test/CodeGen/MIR/AArch64/mirCanonIdempotent.mir b/llvm/test/CodeGen/MIR/AArch64/mirCanonIdempotent.mir
index b30ca7c1c7e3c..63e28498ca532 100644
--- a/llvm/test/CodeGen/MIR/AArch64/mirCanonIdempotent.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/mirCanonIdempotent.mir
@@ -1,12 +1,12 @@
 # RUN: llc -mtriple=arm64-apple-ios11.0.0 -o - -verify-machineinstrs -run-pass mir-canonicalizer %s | FileCheck %s
 # RUN: llc -mtriple=arm64-apple-ios11.0.0 -o - -mir-vreg-namer-use-stable-hash -verify-machineinstrs -run-pass mir-canonicalizer %s | FileCheck %s
 # These Idempotent instructions are sorted alphabetically (based on after the '=')
-# CHECK: %bb0_{{[0-9]+}}__1:gpr64 = MOVi64imm 4617315517961601024
-# CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr32 = MOVi32imm 408
-# CHECK-NEXT: %bb0_{{[0-9]+}}__2:gpr32 = MOVi32imm 408
-# CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr64all = IMPLICIT_DEF
-# CHECK-NEXT: %bb0_{{[0-9]+}}__1:fpr64 = FMOVDi 20
-# CHECK-NEXT: %bb0_{{[0-9]+}}__1:fpr64 = FMOVDi 112
+# CHECK: %bb0_{{[0-9a-f]+}}__1:gpr64 = MOVi64imm 4617315517961601024
+# CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr32 = MOVi32imm 408
+# CHECK-NEXT: %bb0_{{[0-9a-f]+}}__2:gpr32 = MOVi32imm 408
+# CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr64all = IMPLICIT_DEF
+# CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:fpr64 = FMOVDi 20
+# CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:fpr64 = FMOVDi 112
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/MIR/AArch64/mirnamer.mir b/llvm/test/CodeGen/MIR/AArch64/mirnamer.mir
index cdb2ecca60274..a3b339f07d502 100644
--- a/llvm/test/CodeGen/MIR/AArch64/mirnamer.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/mirnamer.mir
@@ -8,9 +8,9 @@ body:             |
     ;CHECK-LABEL: bb.0
     ;CHECK-NEXT: liveins
     ;CHECK-NEXT: {{  $}}
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:_(p0) = COPY $d0
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:_(<4 x s32>) = COPY $q0
-    ;CHECK-NEXT: G_STORE %bb0_{{[0-9]+}}__1(<4 x s32>), %bb0_{{[0-9]+}}__1(p0) :: (store (<4 x s32>))
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:_(p0) = COPY $d0
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:_(<4 x s32>) = COPY $q0
+    ;CHECK-NEXT: G_STORE %bb0_{{[0-9a-f]+}}__1(<4 x s32>), %bb0_{{[0-9a-f]+}}__1(p0) :: (store (<4 x s32>))
 
     liveins: $q0, $d0
     %1:fpr(p0) = COPY $d0
@@ -28,19 +28,19 @@ body:             |
   bb.0:
 
     ;CHECK-LABEL: bb.0
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr32 = LDRWui
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr32 = MOVi32imm 1
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__2:gpr32 = LDRWui
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr32 = MOVi32imm 2
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__3:gpr32 = LDRWui
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr32 = MOVi32imm 3
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr32 = nsw ADDWrr
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__4:gpr32 = LDRWui
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__2:gpr32 = nsw ADDWrr
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr32 = MOVi32imm 4
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__3:gpr32 = nsw ADDWrr
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__5:gpr32 = LDRWui
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr32 = MOVi32imm 5
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr32 = LDRWui
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr32 = MOVi32imm 1
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__2:gpr32 = LDRWui
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr32 = MOVi32imm 2
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__3:gpr32 = LDRWui
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr32 = MOVi32imm 3
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr32 = nsw ADDWrr
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__4:gpr32 = LDRWui
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__2:gpr32 = nsw ADDWrr
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr32 = MOVi32imm 4
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__3:gpr32 = nsw ADDWrr
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__5:gpr32 = LDRWui
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr32 = MOVi32imm 5
 
     %0:gpr32 = LDRWui %stack.0, 0 :: (dereferenceable load (s64))
     %1:gpr32 = MOVi32imm 1
@@ -78,11 +78,11 @@ body:             |
     ;CHECK-LABEL: bb.0:
     ;CHECK-NEXT: liveins
     ;CHECK-NEXT: {{  $}}
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr32 = LDRWui %stack.0, 0
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr32 = COPY %bb0_{{[0-9]+}}__1
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__1:gpr32 = COPY %bb0_{{[0-9]+}}__1
-    ;CHECK-NEXT: %bb0_{{[0-9]+}}__2:gpr32 = COPY %bb0_{{[0-9]+}}__1
-    ;CHECK-NEXT: $w0 = COPY %bb0_{{[0-9]+}}__2
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr32 = LDRWui %stack.0, 0
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr32 = COPY %bb0_{{[0-9a-f]+}}__1
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:gpr32 = COPY %bb0_{{[0-9a-f]+}}__1
+    ;CHECK-NEXT: %bb0_{{[0-9a-f]+}}__2:gpr32 = COPY %bb0_{{[0-9a-f]+}}__1
+    ;CHECK-NEXT: $w0 = COPY %bb0_{{[0-9a-f]+}}__2
 
     %0:gpr32 = LDRWui %stack.0, 0 :: (dereferenceable load (s64))
     %1:gpr32 = COPY %0

diff  --git a/llvm/test/CodeGen/MIR/AMDGPU/mir-canon-multi.mir b/llvm/test/CodeGen/MIR/AMDGPU/mir-canon-multi.mir
index 785cd20d31968..fb1728d9021b7 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/mir-canon-multi.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/mir-canon-multi.mir
@@ -8,18 +8,18 @@ name: foo
 body:             |
   bb.0:
     ; CHECK-LABEL: name: foo
-    ; CHECK: %bb0_{{[0-9]+}}__1:sreg_32_xm0 = S_MOV_B32 61440
-    ; CHECK: %bb0_{{[0-9]+}}__1:sreg_32_xm0 = S_MOV_B32 0
-    ; CHECK: %bb0_{{[0-9]+}}__1:vgpr_32 = COPY $vgpr0
-    ; CHECK: %bb0_{{[0-9]+}}__1:sgpr_64 = COPY $sgpr0_sgpr1
-    ; CHECK: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %bb0_{{[0-9]+}}__1, 9, 0
-    ; CHECK: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %bb0_{{[0-9]+}}__1, 11, 0
-    ; CHECK: %bb0_{{[0-9]+}}__1:vgpr_32 = COPY %bb0_{{[0-9]+}}__1
-    ; CHECK: %bb0_{{[0-9]+}}__1:vgpr_32 = COPY %bb0_{{[0-9]+}}__1
-    ; CHECK: %bb0_{{[0-9]+}}__2:vgpr_32 = COPY %bb0_{{[0-9]+}}__1
-    ; CHECK: %bb0_{{[0-9]+}}__1:vreg_64 = REG_SEQUENCE %bb0_{{[0-9]+}}__1, %subreg.sub0, %bb0_{{[0-9]+}}__1, %subreg.sub1
-    ; CHECK: %bb0_{{[0-9]+}}__1:sgpr_128 = REG_SEQUENCE %bb0_{{[0-9]+}}__1, %subreg.sub0, %bb0_{{[0-9]+}}__1, %subreg.sub1, %bb0_{{[0-9]+}}__1, %subreg.sub2, %bb0_{{[0-9]+}}__2, %subreg.sub3
-    ; CHECK: BUFFER_STORE_DWORD_ADDR64 %bb0_{{[0-9]+}}__1, %bb0_{{[0-9]+}}__1, %bb0_{{[0-9]+}}__1, 0, 0, 0, 0, 0, implicit $exec
+    ; CHECK: %bb0_{{[0-9a-f]+}}__1:sreg_32_xm0 = S_MOV_B32 61440
+    ; CHECK: %bb0_{{[0-9a-f]+}}__1:sreg_32_xm0 = S_MOV_B32 0
+    ; CHECK: %bb0_{{[0-9a-f]+}}__1:vgpr_32 = COPY $vgpr0
+    ; CHECK: %bb0_{{[0-9a-f]+}}__1:sgpr_64 = COPY $sgpr0_sgpr1
+    ; CHECK: %bb0_{{[0-9a-f]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %bb0_{{[0-9a-f]+}}__1, 9, 0
+    ; CHECK: %bb0_{{[0-9a-f]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %bb0_{{[0-9a-f]+}}__1, 11, 0
+    ; CHECK: %bb0_{{[0-9a-f]+}}__1:vgpr_32 = COPY %bb0_{{[0-9a-f]+}}__1
+    ; CHECK: %bb0_{{[0-9a-f]+}}__1:vgpr_32 = COPY %bb0_{{[0-9a-f]+}}__1
+    ; CHECK: %bb0_{{[0-9a-f]+}}__2:vgpr_32 = COPY %bb0_{{[0-9a-f]+}}__1
+    ; CHECK: %bb0_{{[0-9a-f]+}}__1:vreg_64 = REG_SEQUENCE %bb0_{{[0-9a-f]+}}__1, %subreg.sub0, %bb0_{{[0-9a-f]+}}__1, %subreg.sub1
+    ; CHECK: %bb0_{{[0-9a-f]+}}__1:sgpr_128 = REG_SEQUENCE %bb0_{{[0-9a-f]+}}__1, %subreg.sub0, %bb0_{{[0-9a-f]+}}__1, %subreg.sub1, %bb0_{{[0-9a-f]+}}__1, %subreg.sub2, %bb0_{{[0-9a-f]+}}__2, %subreg.sub3
+    ; CHECK: BUFFER_STORE_DWORD_ADDR64 %bb0_{{[0-9a-f]+}}__1, %bb0_{{[0-9a-f]+}}__1, %bb0_{{[0-9a-f]+}}__1, 0, 0, 0, 0, 0, implicit $exec
     ; CHECK: S_ENDPGM 0
     %10:sreg_32_xm0 = S_MOV_B32 61440
     %11:sreg_32_xm0 = S_MOV_B32 0

diff  --git a/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir b/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir
index e5d80e9c59fcd..99a905a1a7306 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir
@@ -25,12 +25,12 @@ body:             |
     liveins: $sgpr4_sgpr5
 
     ; CHECK: COPY
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM
 
     %0 = COPY $sgpr4_sgpr5
     %1 = S_LOAD_DWORDX2_IMM %0, 0, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)

diff  --git a/llvm/test/CodeGen/MIR/X86/mir-canon-hash-bb.mir b/llvm/test/CodeGen/MIR/X86/mir-canon-hash-bb.mir
index ebd29f917ffb6..32dc9e8752d8a 100644
--- a/llvm/test/CodeGen/MIR/X86/mir-canon-hash-bb.mir
+++ b/llvm/test/CodeGen/MIR/X86/mir-canon-hash-bb.mir
@@ -40,7 +40,7 @@ body:             |
     G_BR %bb.2
 
   ; CHECK: bb.1:
-  ; CHECK: %bb2_{{[0-9]+}}__1:_(s32) = G_CONSTANT
+  ; CHECK: %bb2_{{[0-9a-f]+}}__1:_(s32) = G_CONSTANT
   bb.1:
     %tmp4:_(s32) = G_CONSTANT i32 1
     G_STORE %tmp4(s32), %tmp6(p0) :: (store (s32) into %ir.tmp1)
@@ -48,13 +48,13 @@ body:             |
 
 
   ; CHECK: bb.2:
-  ; CHECK: %bb1_{{[0-9]+}}__1:_(s32) = G_CONSTANT
+  ; CHECK: %bb1_{{[0-9a-f]+}}__1:_(s32) = G_CONSTANT
   bb.2:
     %tmp3:_(s32) = G_CONSTANT i32 2
     G_STORE %tmp3(s32), %tmp6(p0) :: (store (s32) into %ir.tmp1)
 
   ; CHECK: bb.3:
-  ; CHECK: %bb3_{{[0-9]+}}__1:_(s32) =  G_LOAD
+  ; CHECK: %bb3_{{[0-9a-f]+}}__1:_(s32) =  G_LOAD
   bb.3:
     %tmp9:_(s32) = G_LOAD %tmp6(p0) :: (load (s32) from %ir.tmp1)
     $eax = COPY %tmp9(s32)

diff  --git a/llvm/test/CodeGen/MIR/X86/mircanon-flags.mir b/llvm/test/CodeGen/MIR/X86/mircanon-flags.mir
index bc5991ea41b5f..6b7b577f8ca54 100644
--- a/llvm/test/CodeGen/MIR/X86/mircanon-flags.mir
+++ b/llvm/test/CodeGen/MIR/X86/mircanon-flags.mir
@@ -12,15 +12,15 @@ body:             |
   bb.0:
 
     ; CHECK: COPY
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:fr32 = nnan VMULSSrr
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:fr32 = ninf VMULSSrr
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:fr32 = nsz VMULSSrr
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:fr32 = arcp VMULSSrr
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:fr32 = contract VMULSSrr
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:fr32 = afn VMULSSrr
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:fr32 = reassoc VMULSSrr
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:fr32 = nsz arcp contract afn reassoc VMULSSrr
-    ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:fr32 = contract afn reassoc VMULSSrr
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:fr32 = nnan VMULSSrr
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:fr32 = ninf VMULSSrr
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:fr32 = nsz VMULSSrr
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:fr32 = arcp VMULSSrr
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:fr32 = contract VMULSSrr
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:fr32 = afn VMULSSrr
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:fr32 = reassoc VMULSSrr
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:fr32 = nsz arcp contract afn reassoc VMULSSrr
+    ; CHECK-NEXT: %bb0_{{[0-9a-f]+}}__1:fr32 = contract afn reassoc VMULSSrr
 
     %0:fr32 = COPY $xmm0
     %1:fr32 = nnan VMULSSrr %0, %0, implicit $mxcsr


        


More information about the llvm-commits mailing list