[llvm] [CodeGen] Regen some old tests; NFC (PR #91250)
via llvm-commits
llvm-commits at lists.llvm.org
Mon May 6 11:17:23 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: None (goldsteinn)
<details>
<summary>Changes</summary>
---
Patch is 928.85 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/91250.diff
51 Files Affected:
- (modified) llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll (+8-6)
- (modified) llvm/test/CodeGen/AArch64/arm64-narrow-st-merge.ll (+125)
- (modified) llvm/test/CodeGen/AArch64/bswap-known-bits.ll (+4-4)
- (modified) llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll (+5-5)
- (modified) llvm/test/CodeGen/AArch64/pull-binop-through-shift.ll (+4-4)
- (modified) llvm/test/CodeGen/AArch64/shift-mod.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/vector_splat-const-shift-of-constmasked.ll (+45-45)
- (modified) llvm/test/CodeGen/AMDGPU/build_vector.ll (+373)
- (modified) llvm/test/CodeGen/AMDGPU/fneg.ll (+657)
- (modified) llvm/test/CodeGen/AMDGPU/kernel-argument-dag-lowering.ll (+332)
- (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ds.bpermute.ll (+57)
- (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll (+3)
- (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.ll (+156)
- (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.ll (+156)
- (modified) llvm/test/CodeGen/AMDGPU/llvm.r600.read.local.size.ll (+343)
- (modified) llvm/test/CodeGen/AMDGPU/scratch-simple.ll (+7168)
- (modified) llvm/test/CodeGen/AMDGPU/sext-in-reg.ll (+2253)
- (modified) llvm/test/CodeGen/AMDGPU/shift-i64-opts.ll (+427)
- (modified) llvm/test/CodeGen/AMDGPU/shl-add-to-add-shl.ll (+31)
- (modified) llvm/test/CodeGen/AMDGPU/shl_add_ptr.ll (+51)
- (modified) llvm/test/CodeGen/AMDGPU/store-private.ll (+1733)
- (modified) llvm/test/CodeGen/ARM/Windows/alloca.ll (+3)
- (modified) llvm/test/CodeGen/ARM/Windows/vla.ll (+4)
- (modified) llvm/test/CodeGen/ARM/and-cmpz.ll (+119)
- (modified) llvm/test/CodeGen/ARM/bfx.ll (+32-17)
- (modified) llvm/test/CodeGen/ARM/sbfx.ll (+28-13)
- (modified) llvm/test/CodeGen/ARM/sdiv-pow2-arm-size.ll (+52-32)
- (modified) llvm/test/CodeGen/ARM/shift-combine.ll (+127)
- (modified) llvm/test/CodeGen/BPF/remove_truncate_9.ll (+3)
- (modified) llvm/test/CodeGen/Mips/cins.ll (+33-16)
- (modified) llvm/test/CodeGen/Mips/fabs.ll (+11-6)
- (modified) llvm/test/CodeGen/Mips/fcopysign-f32-f64.ll (+79-28)
- (modified) llvm/test/CodeGen/Mips/fcopysign.ll (+92-31)
- (modified) llvm/test/CodeGen/Mips/llvm-ir/abs.ll (+113-12)
- (modified) llvm/test/CodeGen/NVPTX/lower-byval-args.ll (+219)
- (modified) llvm/test/CodeGen/NVPTX/mulwide.ll (+4-20)
- (modified) llvm/test/CodeGen/NVPTX/unaligned-param-load-store.ll (+3)
- (modified) llvm/test/CodeGen/PowerPC/coalesce-ext.ll (+9-4)
- (modified) llvm/test/CodeGen/PowerPC/extsh.ll (+1)
- (modified) llvm/test/CodeGen/PowerPC/shl_sext.ll (+1)
- (modified) llvm/test/CodeGen/SystemZ/int-abs-01.ll (+37-24)
- (modified) llvm/test/CodeGen/SystemZ/int-cmp-44.ll (+305-161)
- (modified) llvm/test/CodeGen/SystemZ/int-mul-10.ll (+3-38)
- (modified) llvm/test/CodeGen/SystemZ/int-neg-02.ll (+52-34)
- (modified) llvm/test/CodeGen/Thumb2/bfx.ll (+13-6)
- (modified) llvm/test/CodeGen/VE/Scalar/bitreverse.ll (+1)
- (modified) llvm/test/CodeGen/WebAssembly/conv.ll (+3)
- (modified) llvm/test/CodeGen/WebAssembly/simd-sext-inreg.ll (+5)
- (modified) llvm/test/CodeGen/X86/lvi-hardening-loads.ll (+108-88)
- (modified) llvm/test/CodeGen/X86/sext-subreg.ll (+9-4)
- (modified) llvm/test/CodeGen/X86/x86-64-extend-shift.ll (+6-1)
``````````diff
diff --git a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
index d8280dadc550ea..e14618251b6d7d 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc < %s -o - | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64"
@@ -5,13 +6,14 @@ target triple = "arm64-apple-macosx10.9"
; Check that sexts get promoted above adds.
define void @foo(ptr nocapture %a, i32 %i) {
+; CHECK-LABEL: foo:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: add x8, x0, w1, sxtw #2
+; CHECK-NEXT: ldp w9, w10, [x8, #4]
+; CHECK-NEXT: add w9, w10, w9
+; CHECK-NEXT: str w9, [x8]
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: _foo:
-; CHECK: add
-; CHECK-NEXT: ldp
-; CHECK-NEXT: add
-; CHECK-NEXT: str
-; CHECK-NEXT: ret
%add = add nsw i32 %i, 1
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds i32, ptr %a, i64 %idxprom
diff --git a/llvm/test/CodeGen/AArch64/arm64-narrow-st-merge.ll b/llvm/test/CodeGen/AArch64/arm64-narrow-st-merge.ll
index 81c3195584701c..01ad14b6fba52a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-narrow-st-merge.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-narrow-st-merge.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc < %s -mtriple aarch64 -verify-machineinstrs | FileCheck %s
; RUN: llc < %s -mtriple aarch64 -mattr=+strict-align -verify-machineinstrs | FileCheck %s -check-prefix=CHECK-STRICT
@@ -7,6 +8,19 @@
; CHECK-STRICT: strh wzr
; CHECK-STRICT: strh wzr
define void @Strh_zero(ptr nocapture %P, i32 %n) {
+; CHECK-LABEL: Strh_zero:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT: sbfiz x8, x1, #1, #32
+; CHECK-NEXT: str wzr, [x0, x8]
+; CHECK-NEXT: ret
+;
+; CHECK-STRICT-LABEL: Strh_zero:
+; CHECK-STRICT: // %bb.0: // %entry
+; CHECK-STRICT-NEXT: add x8, x0, w1, sxtw #1
+; CHECK-STRICT-NEXT: strh wzr, [x8]
+; CHECK-STRICT-NEXT: strh wzr, [x8, #2]
+; CHECK-STRICT-NEXT: ret
entry:
%idxprom = sext i32 %n to i64
%arrayidx = getelementptr inbounds i16, ptr %P, i64 %idxprom
@@ -26,6 +40,21 @@ entry:
; CHECK-STRICT: strh wzr
; CHECK-STRICT: strh wzr
define void @Strh_zero_4(ptr nocapture %P, i32 %n) {
+; CHECK-LABEL: Strh_zero_4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT: sbfiz x8, x1, #1, #32
+; CHECK-NEXT: str xzr, [x0, x8]
+; CHECK-NEXT: ret
+;
+; CHECK-STRICT-LABEL: Strh_zero_4:
+; CHECK-STRICT: // %bb.0: // %entry
+; CHECK-STRICT-NEXT: add x8, x0, w1, sxtw #1
+; CHECK-STRICT-NEXT: strh wzr, [x8]
+; CHECK-STRICT-NEXT: strh wzr, [x8, #2]
+; CHECK-STRICT-NEXT: strh wzr, [x8, #4]
+; CHECK-STRICT-NEXT: strh wzr, [x8, #6]
+; CHECK-STRICT-NEXT: ret
entry:
%idxprom = sext i32 %n to i64
%arrayidx = getelementptr inbounds i16, ptr %P, i64 %idxprom
@@ -50,6 +79,18 @@ entry:
; CHECK-STRICT-LABEL: Strw_zero
; CHECK-STRICT: stp wzr, wzr
define void @Strw_zero(ptr nocapture %P, i32 %n) {
+; CHECK-LABEL: Strw_zero:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT: sbfiz x8, x1, #2, #32
+; CHECK-NEXT: str xzr, [x0, x8]
+; CHECK-NEXT: ret
+;
+; CHECK-STRICT-LABEL: Strw_zero:
+; CHECK-STRICT: // %bb.0: // %entry
+; CHECK-STRICT-NEXT: add x8, x0, w1, sxtw #2
+; CHECK-STRICT-NEXT: stp wzr, wzr, [x8]
+; CHECK-STRICT-NEXT: ret
entry:
%idxprom = sext i32 %n to i64
%arrayidx = getelementptr inbounds i32, ptr %P, i64 %idxprom
@@ -64,6 +105,17 @@ entry:
; CHECK-LABEL: Strw_zero_nonzero
; CHECK: stp wzr, w1
define void @Strw_zero_nonzero(ptr nocapture %P, i32 %n) {
+; CHECK-LABEL: Strw_zero_nonzero:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add x8, x0, w1, sxtw #2
+; CHECK-NEXT: stp wzr, w1, [x8]
+; CHECK-NEXT: ret
+;
+; CHECK-STRICT-LABEL: Strw_zero_nonzero:
+; CHECK-STRICT: // %bb.0: // %entry
+; CHECK-STRICT-NEXT: add x8, x0, w1, sxtw #2
+; CHECK-STRICT-NEXT: stp wzr, w1, [x8]
+; CHECK-STRICT-NEXT: ret
entry:
%idxprom = sext i32 %n to i64
%arrayidx = getelementptr inbounds i32, ptr %P, i64 %idxprom
@@ -81,6 +133,18 @@ entry:
; CHECK-STRICT: stp wzr, wzr
; CHECK-STRICT: stp wzr, wzr
define void @Strw_zero_4(ptr nocapture %P, i32 %n) {
+; CHECK-LABEL: Strw_zero_4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add x8, x0, w1, sxtw #2
+; CHECK-NEXT: stp xzr, xzr, [x8]
+; CHECK-NEXT: ret
+;
+; CHECK-STRICT-LABEL: Strw_zero_4:
+; CHECK-STRICT: // %bb.0: // %entry
+; CHECK-STRICT-NEXT: add x8, x0, w1, sxtw #2
+; CHECK-STRICT-NEXT: stp wzr, wzr, [x8]
+; CHECK-STRICT-NEXT: stp wzr, wzr, [x8, #8]
+; CHECK-STRICT-NEXT: ret
entry:
%idxprom = sext i32 %n to i64
%arrayidx = getelementptr inbounds i32, ptr %P, i64 %idxprom
@@ -106,6 +170,18 @@ entry:
; CHECK-STRICT: sturb wzr
; CHECK-STRICT: sturb wzr
define void @Sturb_zero(ptr nocapture %P, i32 %n) #0 {
+; CHECK-LABEL: Sturb_zero:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add x8, x0, w1, sxtw
+; CHECK-NEXT: sturh wzr, [x8, #-2]
+; CHECK-NEXT: ret
+;
+; CHECK-STRICT-LABEL: Sturb_zero:
+; CHECK-STRICT: // %bb.0: // %entry
+; CHECK-STRICT-NEXT: add x8, x0, w1, sxtw
+; CHECK-STRICT-NEXT: sturb wzr, [x8, #-2]
+; CHECK-STRICT-NEXT: sturb wzr, [x8, #-1]
+; CHECK-STRICT-NEXT: ret
entry:
%sub = add nsw i32 %n, -2
%idxprom = sext i32 %sub to i64
@@ -124,6 +200,18 @@ entry:
; CHECK-STRICT: sturh wzr
; CHECK-STRICT: sturh wzr
define void @Sturh_zero(ptr nocapture %P, i32 %n) {
+; CHECK-LABEL: Sturh_zero:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add x8, x0, w1, sxtw #1
+; CHECK-NEXT: stur wzr, [x8, #-6]
+; CHECK-NEXT: ret
+;
+; CHECK-STRICT-LABEL: Sturh_zero:
+; CHECK-STRICT: // %bb.0: // %entry
+; CHECK-STRICT-NEXT: add x8, x0, w1, sxtw #1
+; CHECK-STRICT-NEXT: sturh wzr, [x8, #-4]
+; CHECK-STRICT-NEXT: sturh wzr, [x8, #-6]
+; CHECK-STRICT-NEXT: ret
entry:
%sub = add nsw i32 %n, -2
%idxprom = sext i32 %sub to i64
@@ -144,6 +232,20 @@ entry:
; CHECK-STRICT: sturh wzr
; CHECK-STRICT: sturh wzr
define void @Sturh_zero_4(ptr nocapture %P, i32 %n) {
+; CHECK-LABEL: Sturh_zero_4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add x8, x0, w1, sxtw #1
+; CHECK-NEXT: stur xzr, [x8, #-8]
+; CHECK-NEXT: ret
+;
+; CHECK-STRICT-LABEL: Sturh_zero_4:
+; CHECK-STRICT: // %bb.0: // %entry
+; CHECK-STRICT-NEXT: add x8, x0, w1, sxtw #1
+; CHECK-STRICT-NEXT: sturh wzr, [x8, #-6]
+; CHECK-STRICT-NEXT: sturh wzr, [x8, #-8]
+; CHECK-STRICT-NEXT: sturh wzr, [x8, #-4]
+; CHECK-STRICT-NEXT: sturh wzr, [x8, #-2]
+; CHECK-STRICT-NEXT: ret
entry:
%sub = add nsw i32 %n, -3
%idxprom = sext i32 %sub to i64
@@ -169,6 +271,17 @@ entry:
; CHECK-STRICT-LABEL: Sturw_zero
; CHECK-STRICT: stp wzr, wzr
define void @Sturw_zero(ptr nocapture %P, i32 %n) {
+; CHECK-LABEL: Sturw_zero:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add x8, x0, w1, sxtw #2
+; CHECK-NEXT: stur xzr, [x8, #-16]
+; CHECK-NEXT: ret
+;
+; CHECK-STRICT-LABEL: Sturw_zero:
+; CHECK-STRICT: // %bb.0: // %entry
+; CHECK-STRICT-NEXT: add x8, x0, w1, sxtw #2
+; CHECK-STRICT-NEXT: stp wzr, wzr, [x8, #-16]
+; CHECK-STRICT-NEXT: ret
entry:
%sub = add nsw i32 %n, -3
%idxprom = sext i32 %sub to i64
@@ -187,6 +300,18 @@ entry:
; CHECK-STRICT: stp wzr, wzr
; CHECK-STRICT: stp wzr, wzr
define void @Sturw_zero_4(ptr nocapture %P, i32 %n) {
+; CHECK-LABEL: Sturw_zero_4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add x8, x0, w1, sxtw #2
+; CHECK-NEXT: stp xzr, xzr, [x8, #-16]
+; CHECK-NEXT: ret
+;
+; CHECK-STRICT-LABEL: Sturw_zero_4:
+; CHECK-STRICT: // %bb.0: // %entry
+; CHECK-STRICT-NEXT: add x8, x0, w1, sxtw #2
+; CHECK-STRICT-NEXT: stp wzr, wzr, [x8, #-16]
+; CHECK-STRICT-NEXT: stp wzr, wzr, [x8, #-8]
+; CHECK-STRICT-NEXT: ret
entry:
%sub = add nsw i32 %n, -3
%idxprom = sext i32 %sub to i64
diff --git a/llvm/test/CodeGen/AArch64/bswap-known-bits.ll b/llvm/test/CodeGen/AArch64/bswap-known-bits.ll
index 23619e47367d01..f13ef52f94a414 100644
--- a/llvm/test/CodeGen/AArch64/bswap-known-bits.ll
+++ b/llvm/test/CodeGen/AArch64/bswap-known-bits.ll
@@ -8,7 +8,7 @@ declare i64 @llvm.bswap.i64(i64)
define i1 @test1(i16 %arg) {
; CHECK-LABEL: test1:
; CHECK: ; %bb.0:
-; CHECK-NEXT: mov w0, #1
+; CHECK-NEXT: mov w0, #1 ; =0x1
; CHECK-NEXT: ret
%a = or i16 %arg, 511
%b = call i16 @llvm.bswap.i16(i16 %a)
@@ -20,7 +20,7 @@ define i1 @test1(i16 %arg) {
define i1 @test2(i16 %arg) {
; CHECK-LABEL: test2:
; CHECK: ; %bb.0:
-; CHECK-NEXT: mov w0, #1
+; CHECK-NEXT: mov w0, #1 ; =0x1
; CHECK-NEXT: ret
%a = or i16 %arg, 1
%b = call i16 @llvm.bswap.i16(i16 %a)
@@ -32,7 +32,7 @@ define i1 @test2(i16 %arg) {
define i1 @test3(i16 %arg) {
; CHECK-LABEL: test3:
; CHECK: ; %bb.0:
-; CHECK-NEXT: mov w0, #1
+; CHECK-NEXT: mov w0, #1 ; =0x1
; CHECK-NEXT: ret
%a = or i16 %arg, 256
%b = call i16 @llvm.bswap.i16(i16 %a)
@@ -44,7 +44,7 @@ define i1 @test3(i16 %arg) {
define i1 @test4(i32 %arg) {
; CHECK-LABEL: test4:
; CHECK: ; %bb.0:
-; CHECK-NEXT: mov w0, #1
+; CHECK-NEXT: mov w0, #1 ; =0x1
; CHECK-NEXT: ret
%a = or i32 %arg, 2147483647 ; i32_MAX
%b = call i32 @llvm.bswap.i32(i32 %a)
diff --git a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
index 32a62453202f40..60ceaf19731921 100644
--- a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
@@ -289,7 +289,7 @@ define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
define i1 @scalar_i32_x_is_const_eq(i32 %y) nounwind {
; CHECK-LABEL: scalar_i32_x_is_const_eq:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #43605
+; CHECK-NEXT: mov w8, #43605 // =0xaa55
; CHECK-NEXT: movk w8, #43605, lsl #16
; CHECK-NEXT: lsl w8, w8, w0
; CHECK-NEXT: tst w8, #0x1
@@ -303,8 +303,8 @@ define i1 @scalar_i32_x_is_const_eq(i32 %y) nounwind {
define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
; CHECK-LABEL: scalar_i32_x_is_const2_eq:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #1
-; CHECK-NEXT: mov w9, #43605
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: mov w9, #43605 // =0xaa55
; CHECK-NEXT: lsl w8, w8, w0
; CHECK-NEXT: movk w9, #43605, lsl #16
; CHECK-NEXT: tst w8, w9
@@ -319,7 +319,7 @@ define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
define i1 @scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
; CHECK-LABEL: scalar_i8_bitsinmiddle_slt:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #24
+; CHECK-NEXT: mov w8, #24 // =0x18
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsl w8, w8, w1
; CHECK-NEXT: and w8, w8, w0
@@ -334,7 +334,7 @@ define i1 @scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
; CHECK-LABEL: scalar_i8_signbit_eq_with_nonzero:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #-128
+; CHECK-NEXT: mov w8, #-128 // =0xffffff80
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsl w8, w8, w1
; CHECK-NEXT: and w8, w8, w0
diff --git a/llvm/test/CodeGen/AArch64/pull-binop-through-shift.ll b/llvm/test/CodeGen/AArch64/pull-binop-through-shift.ll
index b3fbe8bdb6e308..a892bb85692d3e 100644
--- a/llvm/test/CodeGen/AArch64/pull-binop-through-shift.ll
+++ b/llvm/test/CodeGen/AArch64/pull-binop-through-shift.ll
@@ -81,7 +81,7 @@ define i32 @xor_nosignbit_shl(i32 %x, ptr %dst) {
define i32 @add_signbit_shl(i32 %x, ptr %dst) {
; CHECK-LABEL: add_signbit_shl:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #-16777216
+; CHECK-NEXT: mov w8, #-16777216 // =0xff000000
; CHECK-NEXT: add w0, w8, w0, lsl #8
; CHECK-NEXT: str w0, [x1]
; CHECK-NEXT: ret
@@ -93,7 +93,7 @@ define i32 @add_signbit_shl(i32 %x, ptr %dst) {
define i32 @add_nosignbit_shl(i32 %x, ptr %dst) {
; CHECK-LABEL: add_nosignbit_shl:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #-16777216
+; CHECK-NEXT: mov w8, #-16777216 // =0xff000000
; CHECK-NEXT: add w0, w8, w0, lsl #8
; CHECK-NEXT: str w0, [x1]
; CHECK-NEXT: ret
@@ -195,7 +195,7 @@ define i32 @add_signbit_lshr(i32 %x, ptr %dst) {
define i32 @add_nosignbit_lshr(i32 %x, ptr %dst) {
; CHECK-LABEL: add_nosignbit_lshr:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2147418112
+; CHECK-NEXT: mov w8, #2147418112 // =0x7fff0000
; CHECK-NEXT: add w8, w0, w8
; CHECK-NEXT: lsr w0, w8, #8
; CHECK-NEXT: str w0, [x1]
@@ -298,7 +298,7 @@ define i32 @add_signbit_ashr(i32 %x, ptr %dst) {
define i32 @add_nosignbit_ashr(i32 %x, ptr %dst) {
; CHECK-LABEL: add_nosignbit_ashr:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2147418112
+; CHECK-NEXT: mov w8, #2147418112 // =0x7fff0000
; CHECK-NEXT: add w8, w0, w8
; CHECK-NEXT: asr w0, w8, #8
; CHECK-NEXT: str w0, [x1]
diff --git a/llvm/test/CodeGen/AArch64/shift-mod.ll b/llvm/test/CodeGen/AArch64/shift-mod.ll
index a90603195cf348..ac95b75168ed98 100644
--- a/llvm/test/CodeGen/AArch64/shift-mod.ll
+++ b/llvm/test/CodeGen/AArch64/shift-mod.ll
@@ -127,7 +127,7 @@ define i64 @ashr_add_shl_i36(i64 %r) {
define i64 @ashr_add_shl_mismatch_shifts1(i64 %r) {
; CHECK-LABEL: ashr_add_shl_mismatch_shifts1:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov x8, #4294967296
+; CHECK-NEXT: mov x8, #4294967296 // =0x100000000
; CHECK-NEXT: add x8, x8, x0, lsl #8
; CHECK-NEXT: asr x0, x8, #32
; CHECK-NEXT: ret
@@ -140,7 +140,7 @@ define i64 @ashr_add_shl_mismatch_shifts1(i64 %r) {
define i64 @ashr_add_shl_mismatch_shifts2(i64 %r) {
; CHECK-LABEL: ashr_add_shl_mismatch_shifts2:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov x8, #4294967296
+; CHECK-NEXT: mov x8, #4294967296 // =0x100000000
; CHECK-NEXT: add x8, x8, x0, lsr #8
; CHECK-NEXT: lsr x0, x8, #8
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/vector_splat-const-shift-of-constmasked.ll b/llvm/test/CodeGen/AArch64/vector_splat-const-shift-of-constmasked.ll
index 7e958b266846a1..6525d6cd7458b5 100644
--- a/llvm/test/CodeGen/AArch64/vector_splat-const-shift-of-constmasked.ll
+++ b/llvm/test/CodeGen/AArch64/vector_splat-const-shift-of-constmasked.ll
@@ -328,7 +328,7 @@ define <8 x i16> @test_128_i16_x_8_127_mask_lshr_1(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_3(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_lshr_3:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ushr v0.8h, v0.8h, #3
@@ -340,7 +340,7 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_3(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_4(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_lshr_4:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ushr v0.8h, v0.8h, #4
@@ -352,7 +352,7 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_4(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_5(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_lshr_5:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ushr v0.8h, v0.8h, #5
@@ -364,7 +364,7 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_5(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_6(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_lshr_6:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ushr v0.8h, v0.8h, #6
@@ -432,7 +432,7 @@ define <8 x i16> @test_128_i16_x_8_127_mask_ashr_1(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_3(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_ashr_3:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ushr v0.8h, v0.8h, #3
@@ -444,7 +444,7 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_3(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_4(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_ashr_4:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ushr v0.8h, v0.8h, #4
@@ -456,7 +456,7 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_4(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_5(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_ashr_5:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ushr v0.8h, v0.8h, #5
@@ -468,7 +468,7 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_5(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_6(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_ashr_6:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ushr v0.8h, v0.8h, #6
@@ -565,7 +565,7 @@ define <8 x i16> @test_128_i16_x_8_127_mask_shl_10(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_3(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_shl_3:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: shl v0.8h, v0.8h, #3
@@ -577,7 +577,7 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_3(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_4(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_shl_4:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: shl v0.8h, v0.8h, #4
@@ -589,7 +589,7 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_4(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_5(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_shl_5:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: shl v0.8h, v0.8h, #5
@@ -601,7 +601,7 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_5(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) {
; CHECK-LABEL: test_128_i16_x_8_2032_mask_shl_6:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2032
+; CHECK-NEXT: mov w8, #2032 // =0x7f0
; CHECK-NEXT: dup v1.8h, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: shl v0.8h, v0.8h, #6
@@ -644,7 +644,7 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_lshr_1(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_7(<4 x i32> %a0) {
; CHECK-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #8388352
+; CHECK-NEXT: mov w8, #8388352 // =0x7fff00
; CHECK-NEXT: dup v1.4s, w8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ushr v0.4s, v0.4s, #7...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/91250
More information about the llvm-commits
mailing list