[llvm] c75d36e - [Hexagon] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 4 02:57:51 PDT 2023


Author: Nikita Popov
Date: 2023-04-04T11:57:42+02:00
New Revision: c75d36e8271558492d78d22e5659fbb542ca103e

URL: https://github.com/llvm/llvm-project/commit/c75d36e8271558492d78d22e5659fbb542ca103e
DIFF: https://github.com/llvm/llvm-project/commit/c75d36e8271558492d78d22e5659fbb542ca103e.diff

LOG: [Hexagon] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/Hexagon/autohvx/vector-align-tbaa.ll
    llvm/test/CodeGen/Hexagon/common-gep-inbounds.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-tbaa.ll b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-tbaa.ll
index 0cef65d381870..51aaa80b143a1 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/vector-align-tbaa.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/vector-align-tbaa.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -opaque-pointers=0 -mtriple=hexagon -S -hexagon-vc -instcombine < %s | FileCheck %s
+; RUN: opt -mtriple=hexagon -S -hexagon-vc -instcombine < %s | FileCheck %s
 
 ; Check that Hexagon Vector Combine propagates (TBAA) metadata to the
 ; generated output. (Use instcombine to clean the output up a bit.)
@@ -9,280 +9,250 @@ target triple = "hexagon"
 
 ; Two unaligned loads, both with the same TBAA tag.
 ;
-define <64 x i16> @f0(i16* %a0, i32 %a1) #0 {
+define <64 x i16> @f0(ptr %a0, i32 %a1) #0 {
 ; CHECK-LABEL: @f0(
 ; CHECK-NEXT:  b0:
 ; CHECK-NEXT:    [[V0:%.*]] = add i32 [[A1:%.*]], 64
-; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, i16* [[A0:%.*]], i32 [[V0]]
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], -128
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i32 [[TMP1]] to <64 x i16>*
-; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP1]] to <32 x i32>*
-; CHECK-NEXT:    [[TMP5:%.*]] = load <32 x i32>, <32 x i32>* [[TMP4]], align 128, !tbaa [[TBAA0:![0-9]+]]
-; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 1
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <64 x i16>* [[TMP6]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP8:%.*]] = load <128 x i8>, <128 x i8>* [[TMP7]], align 128, !tbaa [[TBAA0]]
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 2
-; CHECK-NEXT:    [[TMP10:%.*]] = bitcast <64 x i16>* [[TMP9]] to <32 x i32>*
-; CHECK-NEXT:    [[TMP11:%.*]] = load <32 x i32>, <32 x i32>* [[TMP10]], align 128, !tbaa [[TBAA0]]
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <128 x i8> [[TMP8]] to <32 x i32>
-; CHECK-NEXT:    [[TMP13:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[TMP12]], <32 x i32> [[TMP5]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <32 x i32> [[TMP13]] to <64 x i16>
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <128 x i8> [[TMP8]] to <32 x i32>
-; CHECK-NEXT:    [[TMP16:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[TMP11]], <32 x i32> [[TMP15]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP17:%.*]] = bitcast <32 x i32> [[TMP16]] to <64 x i16>
-; CHECK-NEXT:    [[V8:%.*]] = add <64 x i16> [[TMP14]], [[TMP17]]
+; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[V0]]
+; CHECK-NEXT:    [[PTI:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[ADD:%.*]] = and i32 [[PTI]], -128
+; CHECK-NEXT:    [[ITP:%.*]] = inttoptr i32 [[ADD]] to ptr
+; CHECK-NEXT:    [[PTI1:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[ALD13:%.*]] = load <32 x i32>, ptr [[ITP]], align 128, !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[ITP]], i32 128
+; CHECK-NEXT:    [[ALD2:%.*]] = load <128 x i8>, ptr [[GEP]], align 128, !tbaa [[TBAA0]]
+; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr i8, ptr [[ITP]], i32 256
+; CHECK-NEXT:    [[ALD414:%.*]] = load <32 x i32>, ptr [[GEP3]], align 128, !tbaa [[TBAA0]]
+; CHECK-NEXT:    [[CST:%.*]] = bitcast <128 x i8> [[ALD2]] to <32 x i32>
+; CHECK-NEXT:    [[CUP:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[CST]], <32 x i32> [[ALD13]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST11:%.*]] = bitcast <32 x i32> [[CUP]] to <64 x i16>
+; CHECK-NEXT:    [[CST8:%.*]] = bitcast <128 x i8> [[ALD2]] to <32 x i32>
+; CHECK-NEXT:    [[CUP9:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[ALD414]], <32 x i32> [[CST8]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST12:%.*]] = bitcast <32 x i32> [[CUP9]] to <64 x i16>
+; CHECK-NEXT:    [[V8:%.*]] = add <64 x i16> [[CST11]], [[CST12]]
 ; CHECK-NEXT:    ret <64 x i16> [[V8]]
 ;
 b0:
   %v0 = add i32 %a1, 64
-  %v1 = getelementptr i16, i16* %a0, i32 %v0
-  %v2 = bitcast i16* %v1 to <64 x i16>*
-  %v3 = load <64 x i16>, <64 x i16>* %v2, align 2, !tbaa !0
+  %v1 = getelementptr i16, ptr %a0, i32 %v0
+  %v3 = load <64 x i16>, ptr %v1, align 2, !tbaa !0
   %v4 = add i32 %a1, 128
-  %v5 = getelementptr i16, i16* %a0, i32 %v4
-  %v6 = bitcast i16* %v5 to <64 x i16>*
-  %v7 = load <64 x i16>, <64 x i16>* %v6, align 2, !tbaa !0
+  %v5 = getelementptr i16, ptr %a0, i32 %v4
+  %v7 = load <64 x i16>, ptr %v5, align 2, !tbaa !0
   %v8 = add <64 x i16> %v3, %v7
   ret <64 x i16> %v8
 }
 
 ; Two unaligned loads, only one with a TBAA tag.
 ;
-define <64 x i16> @f1(i16* %a0, i32 %a1) #0 {
+define <64 x i16> @f1(ptr %a0, i32 %a1) #0 {
 ; CHECK-LABEL: @f1(
 ; CHECK-NEXT:  b0:
 ; CHECK-NEXT:    [[V0:%.*]] = add i32 [[A1:%.*]], 64
-; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, i16* [[A0:%.*]], i32 [[V0]]
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], -128
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i32 [[TMP1]] to <64 x i16>*
-; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP1]] to <32 x i32>*
-; CHECK-NEXT:    [[TMP5:%.*]] = load <32 x i32>, <32 x i32>* [[TMP4]], align 128, !tbaa [[TBAA0]]
-; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 1
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <64 x i16>* [[TMP6]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP8:%.*]] = load <128 x i8>, <128 x i8>* [[TMP7]], align 128
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 2
-; CHECK-NEXT:    [[TMP10:%.*]] = bitcast <64 x i16>* [[TMP9]] to <32 x i32>*
-; CHECK-NEXT:    [[TMP11:%.*]] = load <32 x i32>, <32 x i32>* [[TMP10]], align 128
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <128 x i8> [[TMP8]] to <32 x i32>
-; CHECK-NEXT:    [[TMP13:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[TMP12]], <32 x i32> [[TMP5]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <32 x i32> [[TMP13]] to <64 x i16>
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <128 x i8> [[TMP8]] to <32 x i32>
-; CHECK-NEXT:    [[TMP16:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[TMP11]], <32 x i32> [[TMP15]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP17:%.*]] = bitcast <32 x i32> [[TMP16]] to <64 x i16>
-; CHECK-NEXT:    [[V8:%.*]] = add <64 x i16> [[TMP14]], [[TMP17]]
+; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[V0]]
+; CHECK-NEXT:    [[PTI:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[ADD:%.*]] = and i32 [[PTI]], -128
+; CHECK-NEXT:    [[ITP:%.*]] = inttoptr i32 [[ADD]] to ptr
+; CHECK-NEXT:    [[PTI1:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[ALD13:%.*]] = load <32 x i32>, ptr [[ITP]], align 128, !tbaa [[TBAA0]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[ITP]], i32 128
+; CHECK-NEXT:    [[ALD2:%.*]] = load <128 x i8>, ptr [[GEP]], align 128
+; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr i8, ptr [[ITP]], i32 256
+; CHECK-NEXT:    [[ALD414:%.*]] = load <32 x i32>, ptr [[GEP3]], align 128
+; CHECK-NEXT:    [[CST:%.*]] = bitcast <128 x i8> [[ALD2]] to <32 x i32>
+; CHECK-NEXT:    [[CUP:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[CST]], <32 x i32> [[ALD13]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST11:%.*]] = bitcast <32 x i32> [[CUP]] to <64 x i16>
+; CHECK-NEXT:    [[CST8:%.*]] = bitcast <128 x i8> [[ALD2]] to <32 x i32>
+; CHECK-NEXT:    [[CUP9:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[ALD414]], <32 x i32> [[CST8]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST12:%.*]] = bitcast <32 x i32> [[CUP9]] to <64 x i16>
+; CHECK-NEXT:    [[V8:%.*]] = add <64 x i16> [[CST11]], [[CST12]]
 ; CHECK-NEXT:    ret <64 x i16> [[V8]]
 ;
 b0:
   %v0 = add i32 %a1, 64
-  %v1 = getelementptr i16, i16* %a0, i32 %v0
-  %v2 = bitcast i16* %v1 to <64 x i16>*
-  %v3 = load <64 x i16>, <64 x i16>* %v2, align 2, !tbaa !0
+  %v1 = getelementptr i16, ptr %a0, i32 %v0
+  %v3 = load <64 x i16>, ptr %v1, align 2, !tbaa !0
   %v4 = add i32 %a1, 128
-  %v5 = getelementptr i16, i16* %a0, i32 %v4
-  %v6 = bitcast i16* %v5 to <64 x i16>*
-  %v7 = load <64 x i16>, <64 x i16>* %v6, align 2
+  %v5 = getelementptr i16, ptr %a0, i32 %v4
+  %v7 = load <64 x i16>, ptr %v5, align 2
   %v8 = add <64 x i16> %v3, %v7
   ret <64 x i16> %v8
 }
 
 ; Two unaligned loads, with 
diff erent TBAA tags.
 ;
-define <64 x i16> @f2(i16* %a0, i32 %a1) #0 {
+define <64 x i16> @f2(ptr %a0, i32 %a1) #0 {
 ; CHECK-LABEL: @f2(
 ; CHECK-NEXT:  b0:
 ; CHECK-NEXT:    [[V0:%.*]] = add i32 [[A1:%.*]], 64
-; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, i16* [[A0:%.*]], i32 [[V0]]
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], -128
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i32 [[TMP1]] to <64 x i16>*
-; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP1]] to <32 x i32>*
-; CHECK-NEXT:    [[TMP5:%.*]] = load <32 x i32>, <32 x i32>* [[TMP4]], align 128, !tbaa [[TBAA0]]
-; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 1
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <64 x i16>* [[TMP6]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP8:%.*]] = load <128 x i8>, <128 x i8>* [[TMP7]], align 128
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 2
-; CHECK-NEXT:    [[TMP10:%.*]] = bitcast <64 x i16>* [[TMP9]] to <32 x i32>*
-; CHECK-NEXT:    [[TMP11:%.*]] = load <32 x i32>, <32 x i32>* [[TMP10]], align 128, !tbaa [[TBAA3:![0-9]+]]
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <128 x i8> [[TMP8]] to <32 x i32>
-; CHECK-NEXT:    [[TMP13:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[TMP12]], <32 x i32> [[TMP5]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <32 x i32> [[TMP13]] to <64 x i16>
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <128 x i8> [[TMP8]] to <32 x i32>
-; CHECK-NEXT:    [[TMP16:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[TMP11]], <32 x i32> [[TMP15]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP17:%.*]] = bitcast <32 x i32> [[TMP16]] to <64 x i16>
-; CHECK-NEXT:    [[V8:%.*]] = add <64 x i16> [[TMP14]], [[TMP17]]
+; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[V0]]
+; CHECK-NEXT:    [[PTI:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[ADD:%.*]] = and i32 [[PTI]], -128
+; CHECK-NEXT:    [[ITP:%.*]] = inttoptr i32 [[ADD]] to ptr
+; CHECK-NEXT:    [[PTI1:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[ALD13:%.*]] = load <32 x i32>, ptr [[ITP]], align 128, !tbaa [[TBAA0]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[ITP]], i32 128
+; CHECK-NEXT:    [[ALD2:%.*]] = load <128 x i8>, ptr [[GEP]], align 128
+; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr i8, ptr [[ITP]], i32 256
+; CHECK-NEXT:    [[ALD414:%.*]] = load <32 x i32>, ptr [[GEP3]], align 128, !tbaa [[TBAA3:![0-9]+]]
+; CHECK-NEXT:    [[CST:%.*]] = bitcast <128 x i8> [[ALD2]] to <32 x i32>
+; CHECK-NEXT:    [[CUP:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[CST]], <32 x i32> [[ALD13]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST11:%.*]] = bitcast <32 x i32> [[CUP]] to <64 x i16>
+; CHECK-NEXT:    [[CST8:%.*]] = bitcast <128 x i8> [[ALD2]] to <32 x i32>
+; CHECK-NEXT:    [[CUP9:%.*]] = call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> [[ALD414]], <32 x i32> [[CST8]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST12:%.*]] = bitcast <32 x i32> [[CUP9]] to <64 x i16>
+; CHECK-NEXT:    [[V8:%.*]] = add <64 x i16> [[CST11]], [[CST12]]
 ; CHECK-NEXT:    ret <64 x i16> [[V8]]
 ;
 b0:
   %v0 = add i32 %a1, 64
-  %v1 = getelementptr i16, i16* %a0, i32 %v0
-  %v2 = bitcast i16* %v1 to <64 x i16>*
-  %v3 = load <64 x i16>, <64 x i16>* %v2, align 2, !tbaa !0
+  %v1 = getelementptr i16, ptr %a0, i32 %v0
+  %v3 = load <64 x i16>, ptr %v1, align 2, !tbaa !0
   %v4 = add i32 %a1, 128
-  %v5 = getelementptr i16, i16* %a0, i32 %v4
-  %v6 = bitcast i16* %v5 to <64 x i16>*
-  %v7 = load <64 x i16>, <64 x i16>* %v6, align 2, !tbaa !3
+  %v5 = getelementptr i16, ptr %a0, i32 %v4
+  %v7 = load <64 x i16>, ptr %v5, align 2, !tbaa !3
   %v8 = add <64 x i16> %v3, %v7
   ret <64 x i16> %v8
 }
 
 ; Two unaligned stores, both with the same TBAA tag.
 ;
-define void @f3(i16* %a0, i32 %a1, <64 x i16> %a2, <64 x i16> %a3) #0 {
+define void @f3(ptr %a0, i32 %a1, <64 x i16> %a2, <64 x i16> %a3) #0 {
 ; CHECK-LABEL: @f3(
 ; CHECK-NEXT:  b0:
 ; CHECK-NEXT:    [[V0:%.*]] = add i32 [[A1:%.*]], 64
-; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, i16* [[A0:%.*]], i32 [[V0]]
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], -128
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i32 [[TMP1]] to <64 x i16>*
-; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <64 x i16> [[A2:%.*]] to <32 x i32>
-; CHECK-NEXT:    [[TMP5:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[TMP4]], <32 x i32> undef, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <32 x i32> [[TMP5]] to <128 x i8>
-; CHECK-NEXT:    [[TMP7:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> zeroinitializer, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <32 x i32> [[TMP7]] to <128 x i8>
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <64 x i16> [[A3:%.*]] to <32 x i32>
-; CHECK-NEXT:    [[TMP10:%.*]] = bitcast <64 x i16> [[A2]] to <32 x i32>
-; CHECK-NEXT:    [[TMP11:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[TMP9]], <32 x i32> [[TMP10]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <32 x i32> [[TMP11]] to <128 x i8>
-; CHECK-NEXT:    [[TMP13:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <32 x i32> [[TMP13]] to <128 x i8>
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <64 x i16> [[A3]] to <32 x i32>
-; CHECK-NEXT:    [[TMP16:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> undef, <32 x i32> [[TMP15]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP17:%.*]] = bitcast <32 x i32> [[TMP16]] to <128 x i8>
-; CHECK-NEXT:    [[TMP18:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> zeroinitializer, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <32 x i32> [[TMP18]] to <128 x i8>
-; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP1]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP21:%.*]] = trunc <128 x i8> [[TMP8]] to <128 x i1>
-; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[TMP6]], <128 x i8>* [[TMP20]], i32 128, <128 x i1> [[TMP21]]), !tbaa [[TBAA5:![0-9]+]]
-; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 1
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <64 x i16>* [[TMP22]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP24:%.*]] = trunc <128 x i8> [[TMP14]] to <128 x i1>
-; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[TMP12]], <128 x i8>* [[TMP23]], i32 128, <128 x i1> [[TMP24]]), !tbaa [[TBAA5]]
-; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 2
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <64 x i16>* [[TMP25]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP27:%.*]] = trunc <128 x i8> [[TMP19]] to <128 x i1>
-; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[TMP17]], <128 x i8>* [[TMP26]], i32 128, <128 x i1> [[TMP27]]), !tbaa [[TBAA5]]
+; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[V0]]
+; CHECK-NEXT:    [[PTI:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[ADD:%.*]] = and i32 [[PTI]], -128
+; CHECK-NEXT:    [[ITP:%.*]] = inttoptr i32 [[ADD]] to ptr
+; CHECK-NEXT:    [[PTI1:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[CST3:%.*]] = bitcast <64 x i16> [[A2:%.*]] to <32 x i32>
+; CHECK-NEXT:    [[CUP:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[CST3]], <32 x i32> undef, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST4:%.*]] = bitcast <32 x i32> [[CUP]] to <128 x i8>
+; CHECK-NEXT:    [[CUP5:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> zeroinitializer, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST6:%.*]] = bitcast <32 x i32> [[CUP5]] to <128 x i8>
+; CHECK-NEXT:    [[CST7:%.*]] = bitcast <64 x i16> [[A3:%.*]] to <32 x i32>
+; CHECK-NEXT:    [[CST8:%.*]] = bitcast <64 x i16> [[A2]] to <32 x i32>
+; CHECK-NEXT:    [[CUP9:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[CST7]], <32 x i32> [[CST8]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST10:%.*]] = bitcast <32 x i32> [[CUP9]] to <128 x i8>
+; CHECK-NEXT:    [[CUP11:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST12:%.*]] = bitcast <32 x i32> [[CUP11]] to <128 x i8>
+; CHECK-NEXT:    [[CST13:%.*]] = bitcast <64 x i16> [[A3]] to <32 x i32>
+; CHECK-NEXT:    [[CUP14:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> undef, <32 x i32> [[CST13]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST15:%.*]] = bitcast <32 x i32> [[CUP14]] to <128 x i8>
+; CHECK-NEXT:    [[CUP16:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> zeroinitializer, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST17:%.*]] = bitcast <32 x i32> [[CUP16]] to <128 x i8>
+; CHECK-NEXT:    [[TRN:%.*]] = trunc <128 x i8> [[CST6]] to <128 x i1>
+; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0(<128 x i8> [[CST4]], ptr [[ITP]], i32 128, <128 x i1> [[TRN]]), !tbaa [[TBAA5:![0-9]+]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[ITP]], i32 128
+; CHECK-NEXT:    [[TRN18:%.*]] = trunc <128 x i8> [[CST12]] to <128 x i1>
+; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0(<128 x i8> [[CST10]], ptr [[GEP]], i32 128, <128 x i1> [[TRN18]]), !tbaa [[TBAA5]]
+; CHECK-NEXT:    [[GEP19:%.*]] = getelementptr i8, ptr [[ITP]], i32 256
+; CHECK-NEXT:    [[TRN20:%.*]] = trunc <128 x i8> [[CST17]] to <128 x i1>
+; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0(<128 x i8> [[CST15]], ptr [[GEP19]], i32 128, <128 x i1> [[TRN20]]), !tbaa [[TBAA5]]
 ; CHECK-NEXT:    ret void
 ;
 b0:
   %v0 = add i32 %a1, 64
-  %v1 = getelementptr i16, i16* %a0, i32 %v0
-  %v2 = bitcast i16* %v1 to <64 x i16>*
-  store <64 x i16> %a2, <64 x i16>* %v2, align 2, !tbaa !5
+  %v1 = getelementptr i16, ptr %a0, i32 %v0
+  store <64 x i16> %a2, ptr %v1, align 2, !tbaa !5
   %v3 = add i32 %a1, 128
-  %v4 = getelementptr i16, i16* %a0, i32 %v3
-  %v5 = bitcast i16* %v4 to <64 x i16>*
-  store <64 x i16> %a3, <64 x i16>* %v5, align 2, !tbaa !5
+  %v4 = getelementptr i16, ptr %a0, i32 %v3
+  store <64 x i16> %a3, ptr %v4, align 2, !tbaa !5
   ret void
 }
 
 ; Two unaligned stores, only one with a TBAA tag.
 ;
-define void @f4(i16* %a0, i32 %a1, <64 x i16> %a2, <64 x i16> %a3) #0 {
+define void @f4(ptr %a0, i32 %a1, <64 x i16> %a2, <64 x i16> %a3) #0 {
 ; CHECK-LABEL: @f4(
 ; CHECK-NEXT:  b0:
 ; CHECK-NEXT:    [[V0:%.*]] = add i32 [[A1:%.*]], 64
-; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, i16* [[A0:%.*]], i32 [[V0]]
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], -128
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i32 [[TMP1]] to <64 x i16>*
-; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <64 x i16> [[A2:%.*]] to <32 x i32>
-; CHECK-NEXT:    [[TMP5:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[TMP4]], <32 x i32> undef, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <32 x i32> [[TMP5]] to <128 x i8>
-; CHECK-NEXT:    [[TMP7:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> zeroinitializer, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <32 x i32> [[TMP7]] to <128 x i8>
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <64 x i16> [[A3:%.*]] to <32 x i32>
-; CHECK-NEXT:    [[TMP10:%.*]] = bitcast <64 x i16> [[A2]] to <32 x i32>
-; CHECK-NEXT:    [[TMP11:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[TMP9]], <32 x i32> [[TMP10]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <32 x i32> [[TMP11]] to <128 x i8>
-; CHECK-NEXT:    [[TMP13:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <32 x i32> [[TMP13]] to <128 x i8>
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <64 x i16> [[A3]] to <32 x i32>
-; CHECK-NEXT:    [[TMP16:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> undef, <32 x i32> [[TMP15]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP17:%.*]] = bitcast <32 x i32> [[TMP16]] to <128 x i8>
-; CHECK-NEXT:    [[TMP18:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> zeroinitializer, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <32 x i32> [[TMP18]] to <128 x i8>
-; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP1]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP21:%.*]] = trunc <128 x i8> [[TMP8]] to <128 x i1>
-; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[TMP6]], <128 x i8>* [[TMP20]], i32 128, <128 x i1> [[TMP21]])
-; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 1
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <64 x i16>* [[TMP22]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP24:%.*]] = trunc <128 x i8> [[TMP14]] to <128 x i1>
-; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[TMP12]], <128 x i8>* [[TMP23]], i32 128, <128 x i1> [[TMP24]])
-; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 2
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <64 x i16>* [[TMP25]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP27:%.*]] = trunc <128 x i8> [[TMP19]] to <128 x i1>
-; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[TMP17]], <128 x i8>* [[TMP26]], i32 128, <128 x i1> [[TMP27]]), !tbaa [[TBAA5]]
+; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[V0]]
+; CHECK-NEXT:    [[PTI:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[ADD:%.*]] = and i32 [[PTI]], -128
+; CHECK-NEXT:    [[ITP:%.*]] = inttoptr i32 [[ADD]] to ptr
+; CHECK-NEXT:    [[PTI1:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[CST3:%.*]] = bitcast <64 x i16> [[A2:%.*]] to <32 x i32>
+; CHECK-NEXT:    [[CUP:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[CST3]], <32 x i32> undef, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST4:%.*]] = bitcast <32 x i32> [[CUP]] to <128 x i8>
+; CHECK-NEXT:    [[CUP5:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> zeroinitializer, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST6:%.*]] = bitcast <32 x i32> [[CUP5]] to <128 x i8>
+; CHECK-NEXT:    [[CST7:%.*]] = bitcast <64 x i16> [[A3:%.*]] to <32 x i32>
+; CHECK-NEXT:    [[CST8:%.*]] = bitcast <64 x i16> [[A2]] to <32 x i32>
+; CHECK-NEXT:    [[CUP9:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[CST7]], <32 x i32> [[CST8]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST10:%.*]] = bitcast <32 x i32> [[CUP9]] to <128 x i8>
+; CHECK-NEXT:    [[CUP11:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST12:%.*]] = bitcast <32 x i32> [[CUP11]] to <128 x i8>
+; CHECK-NEXT:    [[CST13:%.*]] = bitcast <64 x i16> [[A3]] to <32 x i32>
+; CHECK-NEXT:    [[CUP14:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> undef, <32 x i32> [[CST13]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST15:%.*]] = bitcast <32 x i32> [[CUP14]] to <128 x i8>
+; CHECK-NEXT:    [[CUP16:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> zeroinitializer, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST17:%.*]] = bitcast <32 x i32> [[CUP16]] to <128 x i8>
+; CHECK-NEXT:    [[TRN:%.*]] = trunc <128 x i8> [[CST6]] to <128 x i1>
+; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0(<128 x i8> [[CST4]], ptr [[ITP]], i32 128, <128 x i1> [[TRN]])
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[ITP]], i32 128
+; CHECK-NEXT:    [[TRN18:%.*]] = trunc <128 x i8> [[CST12]] to <128 x i1>
+; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0(<128 x i8> [[CST10]], ptr [[GEP]], i32 128, <128 x i1> [[TRN18]])
+; CHECK-NEXT:    [[GEP19:%.*]] = getelementptr i8, ptr [[ITP]], i32 256
+; CHECK-NEXT:    [[TRN20:%.*]] = trunc <128 x i8> [[CST17]] to <128 x i1>
+; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0(<128 x i8> [[CST15]], ptr [[GEP19]], i32 128, <128 x i1> [[TRN20]]), !tbaa [[TBAA5]]
 ; CHECK-NEXT:    ret void
 ;
 b0:
   %v0 = add i32 %a1, 64
-  %v1 = getelementptr i16, i16* %a0, i32 %v0
-  %v2 = bitcast i16* %v1 to <64 x i16>*
-  store <64 x i16> %a2, <64 x i16>* %v2, align 2
+  %v1 = getelementptr i16, ptr %a0, i32 %v0
+  store <64 x i16> %a2, ptr %v1, align 2
   %v3 = add i32 %a1, 128
-  %v4 = getelementptr i16, i16* %a0, i32 %v3
-  %v5 = bitcast i16* %v4 to <64 x i16>*
-  store <64 x i16> %a3, <64 x i16>* %v5, align 2, !tbaa !5
+  %v4 = getelementptr i16, ptr %a0, i32 %v3
+  store <64 x i16> %a3, ptr %v4, align 2, !tbaa !5
   ret void
 }
 
 ; Two unaligned store, with 
diff erent TBAA tags.
 ;
-define void @f5(i16* %a0, i32 %a1, <64 x i16> %a2, <64 x i16> %a3) #0 {
+define void @f5(ptr %a0, i32 %a1, <64 x i16> %a2, <64 x i16> %a3) #0 {
 ; CHECK-LABEL: @f5(
 ; CHECK-NEXT:  b0:
 ; CHECK-NEXT:    [[V0:%.*]] = add i32 [[A1:%.*]], 64
-; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, i16* [[A0:%.*]], i32 [[V0]]
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], -128
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i32 [[TMP1]] to <64 x i16>*
-; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i16* [[V1]] to i32
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <64 x i16> [[A2:%.*]] to <32 x i32>
-; CHECK-NEXT:    [[TMP5:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[TMP4]], <32 x i32> undef, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <32 x i32> [[TMP5]] to <128 x i8>
-; CHECK-NEXT:    [[TMP7:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> zeroinitializer, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <32 x i32> [[TMP7]] to <128 x i8>
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <64 x i16> [[A3:%.*]] to <32 x i32>
-; CHECK-NEXT:    [[TMP10:%.*]] = bitcast <64 x i16> [[A2]] to <32 x i32>
-; CHECK-NEXT:    [[TMP11:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[TMP9]], <32 x i32> [[TMP10]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <32 x i32> [[TMP11]] to <128 x i8>
-; CHECK-NEXT:    [[TMP13:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <32 x i32> [[TMP13]] to <128 x i8>
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <64 x i16> [[A3]] to <32 x i32>
-; CHECK-NEXT:    [[TMP16:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> undef, <32 x i32> [[TMP15]], i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP17:%.*]] = bitcast <32 x i32> [[TMP16]] to <128 x i8>
-; CHECK-NEXT:    [[TMP18:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> zeroinitializer, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[TMP3]])
-; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <32 x i32> [[TMP18]] to <128 x i8>
-; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i32 [[TMP1]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP21:%.*]] = trunc <128 x i8> [[TMP8]] to <128 x i1>
-; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[TMP6]], <128 x i8>* [[TMP20]], i32 128, <128 x i1> [[TMP21]]), !tbaa [[TBAA5]]
-; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 1
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <64 x i16>* [[TMP22]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP24:%.*]] = trunc <128 x i8> [[TMP14]] to <128 x i1>
-; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[TMP12]], <128 x i8>* [[TMP23]], i32 128, <128 x i1> [[TMP24]])
-; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr <64 x i16>, <64 x i16>* [[TMP2]], i32 2
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <64 x i16>* [[TMP25]] to <128 x i8>*
-; CHECK-NEXT:    [[TMP27:%.*]] = trunc <128 x i8> [[TMP19]] to <128 x i1>
-; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[TMP17]], <128 x i8>* [[TMP26]], i32 128, <128 x i1> [[TMP27]]), !tbaa [[TBAA7:![0-9]+]]
+; CHECK-NEXT:    [[V1:%.*]] = getelementptr i16, ptr [[A0:%.*]], i32 [[V0]]
+; CHECK-NEXT:    [[PTI:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[ADD:%.*]] = and i32 [[PTI]], -128
+; CHECK-NEXT:    [[ITP:%.*]] = inttoptr i32 [[ADD]] to ptr
+; CHECK-NEXT:    [[PTI1:%.*]] = ptrtoint ptr [[V1]] to i32
+; CHECK-NEXT:    [[CST3:%.*]] = bitcast <64 x i16> [[A2:%.*]] to <32 x i32>
+; CHECK-NEXT:    [[CUP:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[CST3]], <32 x i32> undef, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST4:%.*]] = bitcast <32 x i32> [[CUP]] to <128 x i8>
+; CHECK-NEXT:    [[CUP5:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> zeroinitializer, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST6:%.*]] = bitcast <32 x i32> [[CUP5]] to <128 x i8>
+; CHECK-NEXT:    [[CST7:%.*]] = bitcast <64 x i16> [[A3:%.*]] to <32 x i32>
+; CHECK-NEXT:    [[CST8:%.*]] = bitcast <64 x i16> [[A2]] to <32 x i32>
+; CHECK-NEXT:    [[CUP9:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> [[CST7]], <32 x i32> [[CST8]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST10:%.*]] = bitcast <32 x i32> [[CUP9]] to <128 x i8>
+; CHECK-NEXT:    [[CUP11:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST12:%.*]] = bitcast <32 x i32> [[CUP11]] to <128 x i8>
+; CHECK-NEXT:    [[CST13:%.*]] = bitcast <64 x i16> [[A3]] to <32 x i32>
+; CHECK-NEXT:    [[CUP14:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> undef, <32 x i32> [[CST13]], i32 [[PTI1]])
+; CHECK-NEXT:    [[CST15:%.*]] = bitcast <32 x i32> [[CUP14]] to <128 x i8>
+; CHECK-NEXT:    [[CUP16:%.*]] = call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> zeroinitializer, <32 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 [[PTI1]])
+; CHECK-NEXT:    [[CST17:%.*]] = bitcast <32 x i32> [[CUP16]] to <128 x i8>
+; CHECK-NEXT:    [[TRN:%.*]] = trunc <128 x i8> [[CST6]] to <128 x i1>
+; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0(<128 x i8> [[CST4]], ptr [[ITP]], i32 128, <128 x i1> [[TRN]]), !tbaa [[TBAA5]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[ITP]], i32 128
+; CHECK-NEXT:    [[TRN18:%.*]] = trunc <128 x i8> [[CST12]] to <128 x i1>
+; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0(<128 x i8> [[CST10]], ptr [[GEP]], i32 128, <128 x i1> [[TRN18]])
+; CHECK-NEXT:    [[GEP19:%.*]] = getelementptr i8, ptr [[ITP]], i32 256
+; CHECK-NEXT:    [[TRN20:%.*]] = trunc <128 x i8> [[CST17]] to <128 x i1>
+; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0(<128 x i8> [[CST15]], ptr [[GEP19]], i32 128, <128 x i1> [[TRN20]]), !tbaa [[TBAA7:![0-9]+]]
 ; CHECK-NEXT:    ret void
 ;
 b0:
   %v0 = add i32 %a1, 64
-  %v1 = getelementptr i16, i16* %a0, i32 %v0
-  %v2 = bitcast i16* %v1 to <64 x i16>*
-  store <64 x i16> %a2, <64 x i16>* %v2, align 2, !tbaa !5
+  %v1 = getelementptr i16, ptr %a0, i32 %v0
+  store <64 x i16> %a2, ptr %v1, align 2, !tbaa !5
   %v3 = add i32 %a1, 128
-  %v4 = getelementptr i16, i16* %a0, i32 %v3
-  %v5 = bitcast i16* %v4 to <64 x i16>*
-  store <64 x i16> %a3, <64 x i16>* %v5, align 2, !tbaa !7
+  %v4 = getelementptr i16, ptr %a0, i32 %v3
+  store <64 x i16> %a3, ptr %v4, align 2, !tbaa !7
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/common-gep-inbounds.ll b/llvm/test/CodeGen/Hexagon/common-gep-inbounds.ll
index 700e0e93d477e..bb85874439956 100644
--- a/llvm/test/CodeGen/Hexagon/common-gep-inbounds.ll
+++ b/llvm/test/CodeGen/Hexagon/common-gep-inbounds.ll
@@ -1,4 +1,4 @@
-; RUN: llc -opaque-pointers=0 -march=hexagon -debug-only=commgep 2>&1 < %s | FileCheck %s
+; RUN: llc -march=hexagon -debug-only=commgep 2>&1 < %s | FileCheck %s
 ; REQUIRES: asserts
 
 ; We should generate new GEPs with "inbounds" flag.
@@ -10,10 +10,10 @@ target triple = "hexagon"
 %struct.0 = type { i16, i16 }
 
 ; Function Attrs: nounwind
-define i16 @TraceBack(%struct.0* %t) #0 {
+define i16 @TraceBack(ptr %t) #0 {
 entry:
-  %p = getelementptr inbounds %struct.0, %struct.0* %t, i32 0, i32 0
-  %a = load i16, i16* %p
+  %p = getelementptr inbounds %struct.0, ptr %t, i32 0, i32 1
+  %a = load i16, ptr %p
   ret i16 %a
 }
 


        


More information about the llvm-commits mailing list