[llvm] b3df889 - AMDGPU: Convert test to generated checks

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 28 07:35:37 PST 2022


Author: Matt Arsenault
Date: 2022-11-28T10:35:29-05:00
New Revision: b3df889b714206094d17b359e575de1eb09efe63

URL: https://github.com/llvm/llvm-project/commit/b3df889b714206094d17b359e575de1eb09efe63
DIFF: https://github.com/llvm/llvm-project/commit/b3df889b714206094d17b359e575de1eb09efe63.diff

LOG: AMDGPU: Convert test to generated checks

These checks were too thin to begin with, and required slightly
trickier updates for opaque pointers.

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/widen_extending_scalar_loads.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/widen_extending_scalar_loads.ll b/llvm/test/CodeGen/AMDGPU/widen_extending_scalar_loads.ll
index 92df3402edf6..f3532f8d0b85 100644
--- a/llvm/test/CodeGen/AMDGPU/widen_extending_scalar_loads.ll
+++ b/llvm/test/CodeGen/AMDGPU/widen_extending_scalar_loads.ll
@@ -1,187 +1,224 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -mtriple=amdgcn-- -amdgpu-codegenprepare -amdgpu-codegenprepare-widen-constant-loads < %s | FileCheck -check-prefix=OPT %s
 
 declare i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #0
 
-; OPT-LABEL: @constant_load_i1
-; OPT: load i1
-; OPT-NEXT: store i1
 define amdgpu_kernel void @constant_load_i1(i1 addrspace(1)* %out, i1 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i1(
+; OPT-NEXT:    [[VAL:%.*]] = load i1, i1 addrspace(4)* [[IN:%.*]], align 1
+; OPT-NEXT:    store i1 [[VAL]], i1 addrspace(1)* [[OUT:%.*]], align 1
+; OPT-NEXT:    ret void
+;
   %val = load i1, i1 addrspace(4)* %in
   store i1 %val, i1 addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_i1_align2
-; OPT: load i1
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_i1_align2(i1 addrspace(1)* %out, i1 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i1_align2(
+; OPT-NEXT:    [[VAL:%.*]] = load i1, i1 addrspace(4)* [[IN:%.*]], align 2
+; OPT-NEXT:    store i1 [[VAL]], i1 addrspace(1)* [[OUT:%.*]], align 2
+; OPT-NEXT:    ret void
+;
   %val = load i1, i1 addrspace(4)* %in, align 2
   store i1 %val, i1 addrspace(1)* %out, align 2
   ret void
 }
 
-; OPT-LABEL: @constant_load_i1_align4
-; OPT: bitcast
-; OPT-NEXT: load i32
-; OPT-NEXT: trunc
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_i1_align4(i1 addrspace(1)* %out, i1 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i1_align4(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast i1 addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i1
+; OPT-NEXT:    store i1 [[TMP3]], i1 addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %val = load i1, i1 addrspace(4)* %in, align 4
   store i1 %val, i1 addrspace(1)* %out, align 4
   ret void
 }
 
-; OPT-LABEL: @constant_load_i8
-; OPT: load i8
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_i8(i8 addrspace(1)* %out, i8 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i8(
+; OPT-NEXT:    [[VAL:%.*]] = load i8, i8 addrspace(4)* [[IN:%.*]], align 1
+; OPT-NEXT:    store i8 [[VAL]], i8 addrspace(1)* [[OUT:%.*]], align 1
+; OPT-NEXT:    ret void
+;
   %val = load i8, i8 addrspace(4)* %in
   store i8 %val, i8 addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_i8_align2
-; OPT: load i8
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_i8_align2(i8 addrspace(1)* %out, i8 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i8_align2(
+; OPT-NEXT:    [[VAL:%.*]] = load i8, i8 addrspace(4)* [[IN:%.*]], align 2
+; OPT-NEXT:    store i8 [[VAL]], i8 addrspace(1)* [[OUT:%.*]], align 2
+; OPT-NEXT:    ret void
+;
   %val = load i8, i8 addrspace(4)* %in, align 2
   store i8 %val, i8 addrspace(1)* %out, align 2
   ret void
 }
 
-; OPT-LABEL: @constant_load_i8align4
-; OPT: bitcast
-; OPT-NEXT: load i32
-; OPT-NEXT: trunc
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_i8align4(i8 addrspace(1)* %out, i8 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i8align4(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast i8 addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; OPT-NEXT:    store i8 [[TMP3]], i8 addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %val = load i8, i8 addrspace(4)* %in, align 4
   store i8 %val, i8 addrspace(1)* %out, align 4
   ret void
 }
 
-
-; OPT-LABEL: @constant_load_v2i8
-; OPT: load <2 x i8>
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_v2i8(
+; OPT-NEXT:    [[LD:%.*]] = load <2 x i8>, <2 x i8> addrspace(4)* [[IN:%.*]], align 2
+; OPT-NEXT:    store <2 x i8> [[LD]], <2 x i8> addrspace(1)* [[OUT:%.*]], align 2
+; OPT-NEXT:    ret void
+;
   %ld = load <2 x i8>, <2 x i8> addrspace(4)* %in
   store <2 x i8> %ld, <2 x i8> addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_v2i8_align4
-; OPT: bitcast
-; OPT-NEXT: load i32
-; OPT-NEXT: trunc
-; OPT-NEXT: bitcast
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_v2i8_align4(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_v2i8_align4(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast <2 x i8> addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
+; OPT-NEXT:    [[TMP4:%.*]] = bitcast i16 [[TMP3]] to <2 x i8>
+; OPT-NEXT:    store <2 x i8> [[TMP4]], <2 x i8> addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load <2 x i8>, <2 x i8> addrspace(4)* %in, align 4
   store <2 x i8> %ld, <2 x i8> addrspace(1)* %out, align 4
   ret void
 }
 
-; OPT-LABEL: @constant_load_v3i8
-; OPT: bitcast <3 x i8>
-; OPT-NEXT: load i32, i32 addrspace(4)
-; OPT-NEXT: trunc i32
-; OPT-NEXT: bitcast i24
-; OPT-NEXT: store <3 x i8>
 define amdgpu_kernel void @constant_load_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_v3i8(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast <3 x i8> addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i24
+; OPT-NEXT:    [[TMP4:%.*]] = bitcast i24 [[TMP3]] to <3 x i8>
+; OPT-NEXT:    store <3 x i8> [[TMP4]], <3 x i8> addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load <3 x i8>, <3 x i8> addrspace(4)* %in
   store <3 x i8> %ld, <3 x i8> addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_v3i8_align4
-; OPT: bitcast <3 x i8>
-; OPT-NEXT: load i32, i32 addrspace(4)
-; OPT-NEXT: trunc i32
-; OPT-NEXT: bitcast i24
-; OPT-NEXT: store <3 x i8>
 define amdgpu_kernel void @constant_load_v3i8_align4(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_v3i8_align4(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast <3 x i8> addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i24
+; OPT-NEXT:    [[TMP4:%.*]] = bitcast i24 [[TMP3]] to <3 x i8>
+; OPT-NEXT:    store <3 x i8> [[TMP4]], <3 x i8> addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load <3 x i8>, <3 x i8> addrspace(4)* %in, align 4
   store <3 x i8> %ld, <3 x i8> addrspace(1)* %out, align 4
   ret void
 }
 
-; OPT-LABEL: @constant_load_i16
-; OPT: load i16
-; OPT: sext
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_i16(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i16(
+; OPT-NEXT:    [[LD:%.*]] = load i16, i16 addrspace(4)* [[IN:%.*]], align 2
+; OPT-NEXT:    [[EXT:%.*]] = sext i16 [[LD]] to i32
+; OPT-NEXT:    store i32 [[EXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load i16, i16 addrspace(4)* %in
   %ext = sext i16 %ld to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_i16_align4
-; OPT: bitcast
-; OPT-NEXT: load i32
-; OPT-NEXT: trunc
-; OPT-NEXT: sext
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_i16_align4(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i16_align4(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast i16 addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
+; OPT-NEXT:    [[EXT:%.*]] = sext i16 [[TMP3]] to i32
+; OPT-NEXT:    store i32 [[EXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load i16, i16 addrspace(4)* %in, align 4
   %ext = sext i16 %ld to i32
   store i32 %ext, i32 addrspace(1)* %out, align 4
   ret void
 }
 
-; OPT-LABEL: @constant_load_f16
-; OPT: load half
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_f16(half addrspace(1)* %out, half addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_f16(
+; OPT-NEXT:    [[LD:%.*]] = load half, half addrspace(4)* [[IN:%.*]], align 2
+; OPT-NEXT:    store half [[LD]], half addrspace(1)* [[OUT:%.*]], align 2
+; OPT-NEXT:    ret void
+;
   %ld = load half, half addrspace(4)* %in
   store half %ld, half addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_v2f16
-; OPT: load <2 x half>
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_v2f16(
+; OPT-NEXT:    [[LD:%.*]] = load <2 x half>, <2 x half> addrspace(4)* [[IN:%.*]], align 4
+; OPT-NEXT:    store <2 x half> [[LD]], <2 x half> addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load <2 x half>, <2 x half> addrspace(4)* %in
   store <2 x half> %ld, <2 x half> addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @load_volatile
-; OPT: load volatile i16
-; OPT-NEXT: store
 define amdgpu_kernel void @load_volatile(i16 addrspace(1)* %out, i16 addrspace(4)* %in) {
+; OPT-LABEL: @load_volatile(
+; OPT-NEXT:    [[A:%.*]] = load volatile i16, i16 addrspace(4)* [[IN:%.*]], align 2
+; OPT-NEXT:    store i16 [[A]], i16 addrspace(1)* [[OUT:%.*]], align 2
+; OPT-NEXT:    ret void
+;
   %a = load volatile i16, i16 addrspace(4)* %in
   store i16 %a, i16 addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_v2i8_volatile
-; OPT: load volatile <2 x i8>
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_v2i8_volatile(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_v2i8_volatile(
+; OPT-NEXT:    [[LD:%.*]] = load volatile <2 x i8>, <2 x i8> addrspace(4)* [[IN:%.*]], align 2
+; OPT-NEXT:    store <2 x i8> [[LD]], <2 x i8> addrspace(1)* [[OUT:%.*]], align 2
+; OPT-NEXT:    ret void
+;
   %ld = load volatile <2 x i8>, <2 x i8> addrspace(4)* %in
   store <2 x i8> %ld, <2 x i8> addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_v2i8_addrspace1
-; OPT: load <2 x i8>
-; OPT-NEXT: store
 define amdgpu_kernel void @constant_load_v2i8_addrspace1(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) #0 {
+; OPT-LABEL: @constant_load_v2i8_addrspace1(
+; OPT-NEXT:    [[LD:%.*]] = load <2 x i8>, <2 x i8> addrspace(1)* [[IN:%.*]], align 2
+; OPT-NEXT:    store <2 x i8> [[LD]], <2 x i8> addrspace(1)* [[OUT:%.*]], align 2
+; OPT-NEXT:    ret void
+;
   %ld = load <2 x i8>, <2 x i8> addrspace(1)* %in
   store <2 x i8> %ld, <2 x i8> addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @use_dispatch_ptr
-; OPT: bitcast
-; OPT-NEXT: load i32
-; OPT-NEXT: trunc
-; OPT-NEXT: zext
-; OPT-NEXT: store
 define amdgpu_kernel void @use_dispatch_ptr(i32 addrspace(1)* %ptr) #1 {
+; OPT-LABEL: @use_dispatch_ptr(
+; OPT-NEXT:    [[DISPATCH_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast i8 addrspace(4)* [[DISPATCH_PTR]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; OPT-NEXT:    [[LD:%.*]] = zext i8 [[TMP3]] to i32
+; OPT-NEXT:    store i32 [[LD]], i32 addrspace(1)* [[PTR:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %dispatch.ptr = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
   %val = load i8, i8 addrspace(4)* %dispatch.ptr, align 4
   %ld = zext i8 %val to i32
@@ -189,63 +226,105 @@ define amdgpu_kernel void @use_dispatch_ptr(i32 addrspace(1)* %ptr) #1 {
   ret void
 }
 
-; OPT-LABEL: @constant_load_i16_align4_range(
-; OPT: load i32, i32 addrspace(4)* %1, align 4, !range !0
 define amdgpu_kernel void @constant_load_i16_align4_range(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i16_align4_range(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast i16 addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4, !range [[RNG0:![0-9]+]]
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
+; OPT-NEXT:    [[EXT:%.*]] = sext i16 [[TMP3]] to i32
+; OPT-NEXT:    store i32 [[EXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load i16, i16 addrspace(4)* %in, align 4, !range !0
   %ext = sext i16 %ld to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_i16_align4_range_max(
-; OPT: load i32, i32 addrspace(4)* %1, align 4, !range !0
 define amdgpu_kernel void @constant_load_i16_align4_range_max(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i16_align4_range_max(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast i16 addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4, !range [[RNG0]]
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
+; OPT-NEXT:    [[EXT:%.*]] = sext i16 [[TMP3]] to i32
+; OPT-NEXT:    store i32 [[EXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load i16, i16 addrspace(4)* %in, align 4, !range !1
   %ext = sext i16 %ld to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_i16_align4_complex_range(
-; OPT: load i32, i32 addrspace(4)* %1, align 4, !range !1
 define amdgpu_kernel void @constant_load_i16_align4_complex_range(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i16_align4_complex_range(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast i16 addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4, !range [[RNG1:![0-9]+]]
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
+; OPT-NEXT:    [[EXT:%.*]] = sext i16 [[TMP3]] to i32
+; OPT-NEXT:    store i32 [[EXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load i16, i16 addrspace(4)* %in, align 4, !range !2
   %ext = sext i16 %ld to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_i16_align4_range_from_0(
-; OPT: load i32, i32 addrspace(4)* %1, align 4{{$}}
 define amdgpu_kernel void @constant_load_i16_align4_range_from_0(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i16_align4_range_from_0(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast i16 addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
+; OPT-NEXT:    [[EXT:%.*]] = sext i16 [[TMP3]] to i32
+; OPT-NEXT:    store i32 [[EXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load i16, i16 addrspace(4)* %in, align 4, !range !3
   %ext = sext i16 %ld to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_i16_align4_range_from_neg(
-; OPT: load i32, i32 addrspace(4)* %1, align 4, !range !2
 define amdgpu_kernel void @constant_load_i16_align4_range_from_neg(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i16_align4_range_from_neg(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast i16 addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4, !range [[RNG2:![0-9]+]]
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
+; OPT-NEXT:    [[EXT:%.*]] = sext i16 [[TMP3]] to i32
+; OPT-NEXT:    store i32 [[EXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load i16, i16 addrspace(4)* %in, align 4, !range !4
   %ext = sext i16 %ld to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_i16_align4_range_from_neg_to_0(
-; OPT: load i32, i32 addrspace(4)* %1, align 4, !range !2
 define amdgpu_kernel void @constant_load_i16_align4_range_from_neg_to_0(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i16_align4_range_from_neg_to_0(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast i16 addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4, !range [[RNG2]]
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
+; OPT-NEXT:    [[EXT:%.*]] = sext i16 [[TMP3]] to i32
+; OPT-NEXT:    store i32 [[EXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load i16, i16 addrspace(4)* %in, align 4, !range !5
   %ext = sext i16 %ld to i32
   store i32 %ext, i32 addrspace(1)* %out
   ret void
 }
 
-; OPT-LABEL: @constant_load_i16_align4_invariant
-; OPT: load i32, i32 addrspace(4)* %1, align 4, !invariant.load !3
 define amdgpu_kernel void @constant_load_i16_align4_invariant(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 {
+; OPT-LABEL: @constant_load_i16_align4_invariant(
+; OPT-NEXT:    [[TMP1:%.*]] = bitcast i16 addrspace(4)* [[IN:%.*]] to i32 addrspace(4)*
+; OPT-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(4)* [[TMP1]], align 4, !invariant.load !3
+; OPT-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
+; OPT-NEXT:    [[EXT:%.*]] = sext i16 [[TMP3]] to i32
+; OPT-NEXT:    store i32 [[EXT]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; OPT-NEXT:    ret void
+;
   %ld = load i16, i16 addrspace(4)* %in, align 4, !invariant.load !6
   %ext = sext i16 %ld to i32
   store i32 %ext, i32 addrspace(1)* %out


        


More information about the llvm-commits mailing list