[llvm] b511537 - [LSR] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 23 08:14:13 PDT 2023


Author: Nikita Popov
Date: 2023-06-23T17:13:57+02:00
New Revision: b51153792b1fdfe93d3a20a226466b44c8f23eac

URL: https://github.com/llvm/llvm-project/commit/b51153792b1fdfe93d3a20a226466b44c8f23eac
DIFF: https://github.com/llvm/llvm-project/commit/b51153792b1fdfe93d3a20a226466b44c8f23eac.diff

LOG: [LSR] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
    llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
    llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
    llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
    llvm/test/Transforms/LoopStrengthReduce/ivchain.ll
    llvm/test/Transforms/LoopStrengthReduce/nonintegral.ll
    llvm/test/Transforms/LoopStrengthReduce/shl.ll
    llvm/test/Transforms/LoopStrengthReduce/uglygep-address-space.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll b/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
index 36088657e6006..92bc86b11b838 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -opaque-pointers=0 -loop-reduce -S < %s | FileCheck %s
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
 ;
 ; Test LSR's intelligence regarding phi reuse.
 ; Verify that scaled GEPs are not reused. rdar://5064068
@@ -10,34 +10,28 @@ target triple = "x86_64-apple-darwin"
 target datalayout = "n8:16:32:64"
 
 
-define float @test(float* nocapture %A, float* nocapture %B, i32 %N, i32 %IA, i32 %IB) nounwind uwtable readonly ssp {
+define float @test(ptr nocapture %A, ptr nocapture %B, i32 %N, i32 %IA, i32 %IB) nounwind uwtable readonly ssp {
 ; CHECK-LABEL: define float @test
-; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture [[B:%.*]], i32 [[N:%.*]], i32 [[IA:%.*]], i32 [[IB:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: (ptr nocapture [[A:%.*]], ptr nocapture [[B:%.*]], i32 [[N:%.*]], i32 [[IA:%.*]], i32 [[IB:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP1:%.*]] = icmp sgt i32 [[N]], 0
 ; CHECK-NEXT:    br i1 [[CMP1]], label [[WHILE_BODY_LR_PH:%.*]], label [[WHILE_END:%.*]]
 ; CHECK:       while.body.lr.ph:
 ; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[IA]] to i64
 ; CHECK-NEXT:    [[IDX_EXT2:%.*]] = sext i32 [[IB]] to i64
-; CHECK-NEXT:    [[TMP0:%.*]] = shl nsw i64 [[IDX_EXT]], 2
-; CHECK-NEXT:    [[TMP1:%.*]] = shl nsw i64 [[IDX_EXT2]], 2
 ; CHECK-NEXT:    br label [[WHILE_BODY:%.*]]
 ; CHECK:       while.body:
-; CHECK-NEXT:    [[LSR_IV2:%.*]] = phi float* [ [[TMP5:%.*]], [[WHILE_BODY]] ], [ [[B]], [[WHILE_BODY_LR_PH]] ]
-; CHECK-NEXT:    [[LSR_IV:%.*]] = phi float* [ [[TMP4:%.*]], [[WHILE_BODY]] ], [ [[A]], [[WHILE_BODY_LR_PH]] ]
+; CHECK-NEXT:    [[A_ADDR_05:%.*]] = phi ptr [ [[A]], [[WHILE_BODY_LR_PH]] ], [ [[ADD_PTR:%.*]], [[WHILE_BODY]] ]
+; CHECK-NEXT:    [[B_ADDR_04:%.*]] = phi ptr [ [[B]], [[WHILE_BODY_LR_PH]] ], [ [[ADD_PTR3:%.*]], [[WHILE_BODY]] ]
 ; CHECK-NEXT:    [[N_ADDR_03:%.*]] = phi i32 [ [[N]], [[WHILE_BODY_LR_PH]] ], [ [[SUB:%.*]], [[WHILE_BODY]] ]
 ; CHECK-NEXT:    [[SUM0_02:%.*]] = phi float [ 0.000000e+00, [[WHILE_BODY_LR_PH]] ], [ [[ADD:%.*]], [[WHILE_BODY]] ]
-; CHECK-NEXT:    [[LSR_IV1:%.*]] = bitcast float* [[LSR_IV]] to i1*
-; CHECK-NEXT:    [[LSR_IV23:%.*]] = bitcast float* [[LSR_IV2]] to i1*
-; CHECK-NEXT:    [[TMP2:%.*]] = load float, float* [[LSR_IV]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = load float, float* [[LSR_IV2]], align 4
-; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[TMP2]], [[TMP3]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR_05]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[B_ADDR_04]], align 4
+; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[TMP0]], [[TMP1]]
 ; CHECK-NEXT:    [[ADD]] = fadd float [[SUM0_02]], [[MUL]]
+; CHECK-NEXT:    [[ADD_PTR]] = getelementptr inbounds float, ptr [[A_ADDR_05]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[ADD_PTR3]] = getelementptr inbounds float, ptr [[B_ADDR_04]], i64 [[IDX_EXT2]]
 ; CHECK-NEXT:    [[SUB]] = add nsw i32 [[N_ADDR_03]], -1
-; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i1, i1* [[LSR_IV1]], i64 [[TMP0]]
-; CHECK-NEXT:    [[TMP4]] = bitcast i1* [[SCEVGEP]] to float*
-; CHECK-NEXT:    [[SCEVGEP4:%.*]] = getelementptr i1, i1* [[LSR_IV23]], i64 [[TMP1]]
-; CHECK-NEXT:    [[TMP5]] = bitcast i1* [[SCEVGEP4]] to float*
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[SUB]], 0
 ; CHECK-NEXT:    br i1 [[CMP]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT:%.*]]
 ; CHECK:       while.end.loopexit:
@@ -56,16 +50,16 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %A.addr.05 = phi float* [ %A, %while.body.lr.ph ], [ %add.ptr, %while.body ]
-  %B.addr.04 = phi float* [ %B, %while.body.lr.ph ], [ %add.ptr3, %while.body ]
+  %A.addr.05 = phi ptr [ %A, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %B.addr.04 = phi ptr [ %B, %while.body.lr.ph ], [ %add.ptr3, %while.body ]
   %N.addr.03 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %Sum0.02 = phi float [ 0.000000e+00, %while.body.lr.ph ], [ %add, %while.body ]
-  %0 = load float, float* %A.addr.05, align 4
-  %1 = load float, float* %B.addr.04, align 4
+  %0 = load float, ptr %A.addr.05, align 4
+  %1 = load float, ptr %B.addr.04, align 4
   %mul = fmul float %0, %1
   %add = fadd float %Sum0.02, %mul
-  %add.ptr = getelementptr inbounds float, float* %A.addr.05, i64 %idx.ext
-  %add.ptr3 = getelementptr inbounds float, float* %B.addr.04, i64 %idx.ext2
+  %add.ptr = getelementptr inbounds float, ptr %A.addr.05, i64 %idx.ext
+  %add.ptr3 = getelementptr inbounds float, ptr %B.addr.04, i64 %idx.ext2
   %sub = add nsw i32 %N.addr.03, -1
   %cmp = icmp sgt i32 %sub, 0
   br i1 %cmp, label %while.body, label %while.end

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll b/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
index 4e4e81547f637..552cd88037321 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -opaque-pointers=0 -loop-reduce -S < %s | FileCheck %s
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
 ;
 ; PR11571: handle a postinc user outside of for.body7 that requires
 ; recursive expansion of a quadratic recurrence within for.body7. LSR
@@ -16,23 +16,19 @@ define void @vb() nounwind {
 ; CHECK-NEXT:  for.cond.preheader:
 ; CHECK-NEXT:    br label [[FOR_BODY7:%.*]]
 ; CHECK:       for.body7:
-; CHECK-NEXT:    [[LSR_IV1:%.*]] = phi [121 x i32]* [ [[TMP0:%.*]], [[FOR_BODY7]] ], [ bitcast (i32* getelementptr inbounds ([121 x i32], [121 x i32]* @b, i32 0, i32 1) to [121 x i32]*), [[FOR_COND_PREHEADER:%.*]] ]
+; CHECK-NEXT:    [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[FOR_BODY7]] ], [ getelementptr inbounds ([121 x i32], ptr @b, i32 0, i32 1), [[FOR_COND_PREHEADER:%.*]] ]
 ; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY7]] ], [ 8, [[FOR_COND_PREHEADER]] ]
 ; CHECK-NEXT:    [[INDVARS_IV77:%.*]] = phi i32 [ [[INDVARS_IV_NEXT78:%.*]], [[FOR_BODY7]] ], [ 1, [[FOR_COND_PREHEADER]] ]
-; CHECK-NEXT:    [[LSR_IV12:%.*]] = bitcast [121 x i32]* [[LSR_IV1]] to i1*
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT78]] = add i32 [[INDVARS_IV77]], 1
 ; CHECK-NEXT:    [[LSR_IV_NEXT]] = add nuw nsw i32 [[LSR_IV]], 4
-; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i1, i1* [[LSR_IV12]], i32 [[LSR_IV]]
-; CHECK-NEXT:    [[TMP0]] = bitcast i1* [[SCEVGEP]] to [121 x i32]*
+; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV1]], i32 [[LSR_IV]]
 ; CHECK-NEXT:    br i1 true, label [[FOR_BODY43_PREHEADER:%.*]], label [[FOR_BODY7]]
 ; CHECK:       for.body43.preheader:
 ; CHECK-NEXT:    br label [[FOR_BODY43:%.*]]
 ; CHECK:       for.body43:
-; CHECK-NEXT:    [[LSR_IV3:%.*]] = phi [121 x i32]* [ [[LSR_IV1]], [[FOR_BODY43_PREHEADER]] ], [ [[TMP1:%.*]], [[FOR_BODY43]] ]
-; CHECK-NEXT:    [[LSR_IV35:%.*]] = bitcast [121 x i32]* [[LSR_IV3]] to i32*
-; CHECK-NEXT:    [[T2:%.*]] = load i32, i32* [[LSR_IV35]], align 4
-; CHECK-NEXT:    [[SCEVGEP4:%.*]] = getelementptr [121 x i32], [121 x i32]* [[LSR_IV3]], i32 0, i32 1
-; CHECK-NEXT:    [[TMP1]] = bitcast i32* [[SCEVGEP4]] to [121 x i32]*
+; CHECK-NEXT:    [[LSR_IV2:%.*]] = phi ptr [ [[LSR_IV1]], [[FOR_BODY43_PREHEADER]] ], [ [[SCEVGEP3:%.*]], [[FOR_BODY43]] ]
+; CHECK-NEXT:    [[T2:%.*]] = load i32, ptr [[LSR_IV2]], align 4
+; CHECK-NEXT:    [[SCEVGEP3]] = getelementptr i8, ptr [[LSR_IV2]], i32 4
 ; CHECK-NEXT:    br label [[FOR_BODY43]]
 ;
 for.cond.preheader:
@@ -48,8 +44,8 @@ for.body7:
 for.body43:
   %bf.459 = phi i32 [ %inc44, %for.body43 ], [ %t1, %for.body7 ]
   %inc44 = add nsw i32 %bf.459, 1
-  %arrayidx45 = getelementptr inbounds [121 x i32], [121 x i32]* @b, i32 0, i32 %bf.459
-  %t2 = load i32, i32* %arrayidx45, align 4
+  %arrayidx45 = getelementptr inbounds [121 x i32], ptr @b, i32 0, i32 %bf.459
+  %t2 = load i32, ptr %arrayidx45, align 4
   br label %for.body43
 }
 

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
index 005beed5e7b84..753aaed7d77c0 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: opt -opaque-pointers=0 < %s -loop-reduce -mtriple=x86_64  -S | FileCheck %s -check-prefix=INSN
-; RUN: opt -opaque-pointers=0 < %s -loop-reduce -mtriple=x86_64 -lsr-insns-cost=false -S | FileCheck %s -check-prefix=REGS
-; RUN: llc -opaque-pointers=0 < %s -O2 -mtriple=x86_64-unknown-unknown -lsr-insns-cost | FileCheck %s
+; RUN: opt < %s -loop-reduce -mtriple=x86_64  -S | FileCheck %s -check-prefix=INSN
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -lsr-insns-cost=false -S | FileCheck %s -check-prefix=REGS
+; RUN: llc < %s -O2 -mtriple=x86_64-unknown-unknown -lsr-insns-cost | FileCheck %s
 
 ; OPT test checks that LSR optimize compare for static counter to compare with 0.
 
@@ -19,30 +19,24 @@
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 
-define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* nocapture %q) {
+define void @foo(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr nocapture %q) {
 ; INSN-LABEL: @foo(
 ; INSN-NEXT:  entry:
-; INSN-NEXT:    [[Q1:%.*]] = bitcast i32* [[Q:%.*]] to i8*
-; INSN-NEXT:    [[Y3:%.*]] = bitcast i32* [[Y:%.*]] to i8*
-; INSN-NEXT:    [[X7:%.*]] = bitcast i32* [[X:%.*]] to i8*
 ; INSN-NEXT:    br label [[FOR_BODY:%.*]]
 ; INSN:       for.cond.cleanup:
 ; INSN-NEXT:    ret void
 ; INSN:       for.body:
 ; INSN-NEXT:    [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY]] ], [ -4096, [[ENTRY:%.*]] ]
-; INSN-NEXT:    [[UGLYGEP8:%.*]] = getelementptr i8, i8* [[X7]], i64 [[LSR_IV]]
-; INSN-NEXT:    [[UGLYGEP89:%.*]] = bitcast i8* [[UGLYGEP8]] to i32*
-; INSN-NEXT:    [[SCEVGEP10:%.*]] = getelementptr i32, i32* [[UGLYGEP89]], i64 1024
-; INSN-NEXT:    [[TMP:%.*]] = load i32, i32* [[SCEVGEP10]], align 4
-; INSN-NEXT:    [[UGLYGEP4:%.*]] = getelementptr i8, i8* [[Y3]], i64 [[LSR_IV]]
-; INSN-NEXT:    [[UGLYGEP45:%.*]] = bitcast i8* [[UGLYGEP4]] to i32*
-; INSN-NEXT:    [[SCEVGEP6:%.*]] = getelementptr i32, i32* [[UGLYGEP45]], i64 1024
-; INSN-NEXT:    [[TMP1:%.*]] = load i32, i32* [[SCEVGEP6]], align 4
+; INSN-NEXT:    [[SCEVGEP4:%.*]] = getelementptr i8, ptr [[X:%.*]], i64 [[LSR_IV]]
+; INSN-NEXT:    [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[SCEVGEP4]], i64 4096
+; INSN-NEXT:    [[TMP:%.*]] = load i32, ptr [[SCEVGEP5]], align 4
+; INSN-NEXT:    [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[Y:%.*]], i64 [[LSR_IV]]
+; INSN-NEXT:    [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[SCEVGEP2]], i64 4096
+; INSN-NEXT:    [[TMP1:%.*]] = load i32, ptr [[SCEVGEP3]], align 4
 ; INSN-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP]]
-; INSN-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, i8* [[Q1]], i64 [[LSR_IV]]
-; INSN-NEXT:    [[UGLYGEP2:%.*]] = bitcast i8* [[UGLYGEP]] to i32*
-; INSN-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32* [[UGLYGEP2]], i64 1024
-; INSN-NEXT:    store i32 [[ADD]], i32* [[SCEVGEP]], align 4
+; INSN-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr [[Q:%.*]], i64 [[LSR_IV]]
+; INSN-NEXT:    [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 4096
+; INSN-NEXT:    store i32 [[ADD]], ptr [[SCEVGEP1]], align 4
 ; INSN-NEXT:    [[LSR_IV_NEXT]] = add nsw i64 [[LSR_IV]], 4
 ; INSN-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[LSR_IV_NEXT]], 0
 ; INSN-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
@@ -54,13 +48,16 @@ define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* no
 ; REGS-NEXT:    ret void
 ; REGS:       for.body:
 ; REGS-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; REGS-NEXT:    [[SCEVGEP2:%.*]] = getelementptr i32, i32* [[X:%.*]], i64 [[INDVARS_IV]]
-; REGS-NEXT:    [[TMP:%.*]] = load i32, i32* [[SCEVGEP2]], align 4
-; REGS-NEXT:    [[SCEVGEP1:%.*]] = getelementptr i32, i32* [[Y:%.*]], i64 [[INDVARS_IV]]
-; REGS-NEXT:    [[TMP1:%.*]] = load i32, i32* [[SCEVGEP1]], align 4
+; REGS-NEXT:    [[TMP0:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 2
+; REGS-NEXT:    [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[X:%.*]], i64 [[TMP0]]
+; REGS-NEXT:    [[TMP:%.*]] = load i32, ptr [[SCEVGEP2]], align 4
+; REGS-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 2
+; REGS-NEXT:    [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[Y:%.*]], i64 [[TMP1]]
+; REGS-NEXT:    [[TMP1:%.*]] = load i32, ptr [[SCEVGEP1]], align 4
 ; REGS-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP]]
-; REGS-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32* [[Q:%.*]], i64 [[INDVARS_IV]]
-; REGS-NEXT:    store i32 [[ADD]], i32* [[SCEVGEP]], align 4
+; REGS-NEXT:    [[TMP2:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 2
+; REGS-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr [[Q:%.*]], i64 [[TMP2]]
+; REGS-NEXT:    store i32 [[ADD]], ptr [[SCEVGEP]], align 4
 ; REGS-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; REGS-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024
 ; REGS-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
@@ -86,13 +83,13 @@ for.cond.cleanup:                                 ; preds = %for.body
 
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
-  %tmp = load i32, i32* %arrayidx, align 4
-  %arrayidx2 = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
-  %tmp1 = load i32, i32* %arrayidx2, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
+  %tmp = load i32, ptr %arrayidx, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %y, i64 %indvars.iv
+  %tmp1 = load i32, ptr %arrayidx2, align 4
   %add = add nsw i32 %tmp1, %tmp
-  %arrayidx4 = getelementptr inbounds i32, i32* %q, i64 %indvars.iv
-  store i32 %add, i32* %arrayidx4, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr %q, i64 %indvars.iv
+  store i32 %add, ptr %arrayidx4, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 1024
   br i1 %exitcond, label %for.cond.cleanup, label %for.body

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
index a663a7fa65380..3de340473c67f 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
@@ -1,21 +1,24 @@
-; RUN: opt -opaque-pointers=0 < %s -loop-reduce -mtriple=x86_64-- -S | FileCheck %s -check-prefix=BOTH -check-prefix=INSN
-; RUN: opt -opaque-pointers=0 < %s -loop-reduce -mtriple=x86_64-- -lsr-insns-cost=false -S | FileCheck %s -check-prefix=BOTH -check-prefix=REGS
-; RUN: llc -opaque-pointers=0 < %s -O2 -mtriple=x86_64-- -lsr-insns-cost -asm-verbose=0 | FileCheck %s
+; RUN: opt < %s -loop-reduce -mtriple=x86_64-- -S | FileCheck %s -check-prefix=BOTH -check-prefix=INSN
+; RUN: opt < %s -loop-reduce -mtriple=x86_64-- -lsr-insns-cost=false -S | FileCheck %s -check-prefix=BOTH -check-prefix=REGS
+; RUN: llc < %s -O2 -mtriple=x86_64-- -lsr-insns-cost -asm-verbose=0 | FileCheck %s
 
 ; OPT checks that LSR prefers less instructions to less registers.
 ; For x86 LSR should prefer complicated address to new lsr induction
 ; variables.
 
 ; BOTH: for.body:
-; INSN:   getelementptr i32, i32* %x, i64 %indvars.iv
-; INSN:   getelementptr i32, i32* %y, i64 %indvars.iv
-; INSN:   getelementptr i32, i32* %q, i64 %indvars.iv
+; INSN:   [[OFFSET1:%.+]] = shl nuw nsw i64 %indvars.iv, 2
+; INSN:   getelementptr i8, ptr %x, i64 [[OFFSET1]]
+; INSN:   [[OFFSET2:%.+]] = shl nuw nsw i64 %indvars.iv, 2
+; INSN:   getelementptr i8, ptr %y, i64 [[OFFSET2]]
+; INSN:   [[OFFSET3:%.+]] = shl nuw nsw i64 %indvars.iv, 2
+; INSN:   getelementptr i8, ptr %q, i64 [[OFFSET3]]
 ; REGS:   %lsr.iv4 = phi
 ; REGS:   %lsr.iv2 = phi
 ; REGS:   %lsr.iv1 = phi
-; REGS:   getelementptr i32, i32* %lsr.iv1, i64 1
-; REGS:   getelementptr i32, i32* %lsr.iv2, i64 1
-; REGS:   getelementptr i32, i32* %lsr.iv4, i64 1
+; REGS:   getelementptr i8, ptr %lsr.iv1, i64 4
+; REGS:   getelementptr i8, ptr %lsr.iv2, i64 4
+; REGS:   getelementptr i8, ptr %lsr.iv4, i64 4
 
 ; LLC checks that LSR prefers less instructions to less registers.
 ; LSR should prefer complicated address to additonal add instructions.
@@ -28,7 +31,7 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 
 ; Function Attrs: norecurse nounwind uwtable
-define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* nocapture %q, i32 %n) {
+define void @foo(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr nocapture %q, i32 %n) {
 entry:
   %cmp10 = icmp sgt i32 %n, 0
   br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup
@@ -45,13 +48,13 @@ for.cond.cleanup:                                 ; preds = %for.cond.cleanup.lo
 
 for.body:                                         ; preds = %for.body, %for.body.preheader
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
-  %tmp = load i32, i32* %arrayidx, align 4
-  %arrayidx2 = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
-  %tmp1 = load i32, i32* %arrayidx2, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
+  %tmp = load i32, ptr %arrayidx, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %y, i64 %indvars.iv
+  %tmp1 = load i32, ptr %arrayidx2, align 4
   %add = add nsw i32 %tmp1, %tmp
-  %arrayidx4 = getelementptr inbounds i32, i32* %q, i64 %indvars.iv
-  store i32 %add, i32* %arrayidx4, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr %q, i64 %indvars.iv
+  store i32 %add, ptr %arrayidx4, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
   br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/ivchain.ll b/llvm/test/Transforms/LoopStrengthReduce/ivchain.ll
index b8ddf72b90b96..49a06f0aee371 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/ivchain.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/ivchain.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -opaque-pointers=0 < %s -loop-reduce -S | FileCheck %s
-; RUN: opt -opaque-pointers=0 -passes='require<scalar-evolution>,require<targetir>,loop(loop-reduce)' < %s -S | FileCheck %s
+; RUN: opt < %s -loop-reduce -S | FileCheck %s
+; RUN: opt -passes='require<scalar-evolution>,require<targetir>,loop(loop-reduce)' < %s -S | FileCheck %s
 ;
 ; PR11782: bad cast to AddRecExpr.
 ; A sign extend feeds an IVUser and cannot be hoisted into the AddRec.
@@ -10,30 +10,29 @@
 ; Provide legal integer types.
 target datalayout = "n8:16:32:64"
 
-%struct = type { i8*, i8*, i16, i64, i16, i16, i16, i64, i64, i16, i8*, i64, i64, i64 }
+%struct = type { ptr, ptr, i16, i64, i16, i16, i16, i64, i64, i16, ptr, i64, i64, i64 }
 
-define i32 @test(i8* %h, i32 %more) nounwind uwtable {
+define i32 @test(ptr %h, i32 %more) nounwind uwtable {
 ; CHECK-LABEL: define i32 @test
-; CHECK-SAME: (i8* [[H:%.*]], i32 [[MORE:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: (ptr [[H:%.*]], i32 [[MORE:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 undef, label [[LAND_END238:%.*]], label [[RETURN:%.*]]
 ; CHECK:       land.end238:
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[LSR_IV:%.*]] = phi %struct* [ [[SCEVGEP:%.*]], [[SW_EPILOG:%.*]] ], [ undef, [[LAND_END238]] ]
+; CHECK-NEXT:    [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[SW_EPILOG:%.*]] ], [ undef, [[LAND_END238]] ]
 ; CHECK-NEXT:    [[COLUMN_N_0:%.*]] = phi i16 [ 0, [[LAND_END238]] ], [ [[INC601:%.*]], [[SW_EPILOG]] ]
-; CHECK-NEXT:    [[LSR_IV1:%.*]] = bitcast %struct* [[LSR_IV]] to i64*
 ; CHECK-NEXT:    [[CONV250:%.*]] = sext i16 [[COLUMN_N_0]] to i32
 ; CHECK-NEXT:    [[ADD257:%.*]] = add nsw i32 [[CONV250]], 1
 ; CHECK-NEXT:    [[CONV258:%.*]] = trunc i32 [[ADD257]] to i16
 ; CHECK-NEXT:    [[CMP263:%.*]] = icmp ult i16 undef, 2
 ; CHECK-NEXT:    br label [[IF_END388:%.*]]
 ; CHECK:       if.end388:
-; CHECK-NEXT:    [[CALL405:%.*]] = call signext i16 @SQLColAttribute(i8* undef, i16 zeroext [[CONV258]], i16 zeroext 1003, i8* null, i16 signext 0, i16* null, i64* [[LSR_IV1]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT:    [[CALL405:%.*]] = call signext i16 @SQLColAttribute(ptr undef, i16 zeroext [[CONV258]], i16 zeroext 1003, ptr null, i16 signext 0, ptr null, ptr [[LSR_IV]]) #[[ATTR1:[0-9]+]]
 ; CHECK-NEXT:    br label [[SW_EPILOG]]
 ; CHECK:       sw.epilog:
 ; CHECK-NEXT:    [[INC601]] = add i16 [[COLUMN_N_0]], 1
-; CHECK-NEXT:    [[SCEVGEP]] = getelementptr [[STRUCT:%.*]], %struct* [[LSR_IV]], i64 1
+; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 88
 ; CHECK-NEXT:    br label [[FOR_BODY]]
 ; CHECK:       return:
 ; CHECK-NEXT:    ret i32 1
@@ -45,7 +44,7 @@ land.end238:                                      ; preds = %if.end229
   br label %for.body
 
 for.body:                                         ; preds = %sw.epilog, %land.end238
-  %fbh.0 = phi %struct* [ undef, %land.end238 ], [ %incdec.ptr, %sw.epilog ]
+  %fbh.0 = phi ptr [ undef, %land.end238 ], [ %incdec.ptr, %sw.epilog ]
   %column_n.0 = phi i16 [ 0, %land.end238 ], [ %inc601, %sw.epilog ]
   %conv250 = sext i16 %column_n.0 to i32
   %add257 = add nsw i32 %conv250, 1
@@ -54,17 +53,17 @@ for.body:                                         ; preds = %sw.epilog, %land.en
   br label %if.end388
 
 if.end388:                                        ; preds = %if.then380, %if.else356
-  %ColLength = getelementptr inbounds %struct, %struct* %fbh.0, i64 0, i32 7
-  %call405 = call signext i16 @SQLColAttribute(i8* undef, i16 zeroext %conv258, i16 zeroext 1003, i8* null, i16 signext 0, i16* null, i64* %ColLength) nounwind
+  %ColLength = getelementptr inbounds %struct, ptr %fbh.0, i64 0, i32 7
+  %call405 = call signext i16 @SQLColAttribute(ptr undef, i16 zeroext %conv258, i16 zeroext 1003, ptr null, i16 signext 0, ptr null, ptr %ColLength) nounwind
   br label %sw.epilog
 
 sw.epilog:                                        ; preds = %sw.bb542, %sw.bb523, %if.end475
   %inc601 = add i16 %column_n.0, 1
-  %incdec.ptr = getelementptr inbounds %struct, %struct* %fbh.0, i64 1
+  %incdec.ptr = getelementptr inbounds %struct, ptr %fbh.0, i64 1
   br label %for.body
 
 return:                                           ; preds = %entry
   ret i32 1
 }
 
-declare signext i16 @SQLColAttribute(i8*, i16 zeroext, i16 zeroext, i8*, i16 signext, i16*, i64*)
+declare signext i16 @SQLColAttribute(ptr, i16 zeroext, i16 zeroext, ptr, i16 signext, ptr, ptr)

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/nonintegral.ll b/llvm/test/Transforms/LoopStrengthReduce/nonintegral.ll
index ff983652dc096..1c29331a9ac38 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/nonintegral.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/nonintegral.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -opaque-pointers=0 -S -loop-reduce < %s | FileCheck %s
+; RUN: opt -S -loop-reduce < %s | FileCheck %s
 
 ; Address Space 10 is non-integral. The optimizer is not allowed to use
 ; ptrtoint/inttoptr instructions. Make sure that this doesn't happen
@@ -9,9 +9,9 @@ target triple = "x86_64-unknown-linux-gnu"
 ; How exactly SCEV chooses to materialize isn't all that important, as
 ; long as it doesn't try to round-trip through integers. As of this writing,
 ; it emits a byte-wise gep, which is fine.
-define void @japi1__unsafe_getindex_65028(i64 addrspace(10)* %arg) {
+define void @japi1__unsafe_getindex_65028(ptr addrspace(10) %arg) {
 ; CHECK-LABEL: define void @japi1__unsafe_getindex_65028
-; CHECK-SAME: (i64 addrspace(10)* [[ARG:%.*]]) {
+; CHECK-SAME: (ptr addrspace(10) [[ARG:%.*]]) {
 ; CHECK-NEXT:  top:
 ; CHECK-NEXT:    br label [[L86:%.*]]
 ; CHECK:       L86:
@@ -19,22 +19,23 @@ define void @japi1__unsafe_getindex_65028(i64 addrspace(10)* %arg) {
 ; CHECK-NEXT:    [[LSR_IV_NEXT5]] = add nsw i64 [[LSR_IV4]], 2
 ; CHECK-NEXT:    br i1 false, label [[L86]], label [[IF29:%.*]]
 ; CHECK:       if29:
-; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i64, i64 addrspace(10)* [[ARG]], i64 -1
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr addrspace(10) [[ARG]], i64 -8
 ; CHECK-NEXT:    br label [[IF31:%.*]]
 ; CHECK:       if31:
 ; CHECK-NEXT:    %"#temp#1.sroa.0.022" = phi i64 [ 0, [[IF29]] ], [ [[TMP3_LCSSA:%.*]], [[IF38:%.*]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[LSR_IV_NEXT5]], %"#temp#1.sroa.0.022"
-; CHECK-NEXT:    [[SCEVGEP1:%.*]] = getelementptr i64, i64 addrspace(10)* [[SCEVGEP]], i64 [[TMP0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
+; CHECK-NEXT:    [[SCEVGEP1:%.*]] = getelementptr i8, ptr addrspace(10) [[SCEVGEP]], i64 [[TMP1]]
 ; CHECK-NEXT:    br label [[L119:%.*]]
 ; CHECK:       L119:
-; CHECK-NEXT:    [[LSR_IV2:%.*]] = phi i64 addrspace(10)* [ [[SCEVGEP3:%.*]], [[L119]] ], [ [[SCEVGEP1]], [[IF31]] ]
+; CHECK-NEXT:    [[LSR_IV2:%.*]] = phi ptr addrspace(10) [ [[SCEVGEP3:%.*]], [[L119]] ], [ [[SCEVGEP1]], [[IF31]] ]
 ; CHECK-NEXT:    [[I5_0:%.*]] = phi i64 [ %"#temp#1.sroa.0.022", [[IF31]] ], [ [[TMP3:%.*]], [[L119]] ]
 ; CHECK-NEXT:    [[TMP3]] = add i64 [[I5_0]], 1
-; CHECK-NEXT:    [[SCEVGEP3]] = getelementptr i64, i64 addrspace(10)* [[LSR_IV2]], i64 1
+; CHECK-NEXT:    [[SCEVGEP3]] = getelementptr i8, ptr addrspace(10) [[LSR_IV2]], i64 8
 ; CHECK-NEXT:    br i1 false, label [[L119]], label [[IF38]]
 ; CHECK:       if38:
 ; CHECK-NEXT:    [[TMP3_LCSSA]] = phi i64 [ [[TMP3]], [[L119]] ]
-; CHECK-NEXT:    [[TMP6:%.*]] = load i64, i64 addrspace(10)* [[SCEVGEP3]], align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i64, ptr addrspace(10) [[SCEVGEP3]], align 8
 ; CHECK-NEXT:    br i1 true, label [[DONE:%.*]], label [[IF31]]
 ; CHECK:       done:
 ; CHECK-NEXT:    ret void
@@ -63,8 +64,8 @@ L119:                                             ; preds = %L119, %if31
 
 if38:                                             ; preds = %L119
   %tmp4 = add i64 %tmp2, %i5.0
-  %tmp5 = getelementptr i64, i64 addrspace(10)* %arg, i64 %tmp4
-  %tmp6 = load i64, i64 addrspace(10)* %tmp5
+  %tmp5 = getelementptr i64, ptr addrspace(10) %arg, i64 %tmp4
+  %tmp6 = load i64, ptr addrspace(10) %tmp5
   br i1 undef, label %done, label %if31
 
 done:                                             ; preds = %if38

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/shl.ll b/llvm/test/Transforms/LoopStrengthReduce/shl.ll
index 4a2136c484f20..2a341e96d0cad 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/shl.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/shl.ll
@@ -1,12 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -opaque-pointers=0 < %s -loop-reduce -gvn -S | FileCheck %s
+; RUN: opt < %s -loop-reduce -gvn -S | FileCheck %s
 
 target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
 
 ; LoopStrengthReduce should reuse %mul as the stride.
-define void @_Z3fooPfll(float* nocapture readonly %input, i64 %n, i64 %s) {
+define void @_Z3fooPfll(ptr nocapture readonly %input, i64 %n, i64 %s) {
 ; CHECK-LABEL: define void @_Z3fooPfll
-; CHECK-SAME: (float* nocapture readonly [[INPUT:%.*]], i64 [[N:%.*]], i64 [[S:%.*]]) {
+; CHECK-SAME: (ptr nocapture readonly [[INPUT:%.*]], i64 [[N:%.*]], i64 [[S:%.*]]) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[MUL:%.*]] = shl i64 [[S]], 2
 ; CHECK-NEXT:    tail call void @_Z3bazl(i64 [[MUL]])
@@ -19,14 +19,12 @@ define void @_Z3fooPfll(float* nocapture readonly %input, i64 %n, i64 %s) {
 ; CHECK:       for.cond.cleanup:
 ; CHECK-NEXT:    ret void
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[LSR_IV:%.*]] = phi float* [ [[TMP1:%.*]], [[FOR_BODY]] ], [ [[INPUT]], [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT:    [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[FOR_BODY]] ], [ [[INPUT]], [[FOR_BODY_PREHEADER]] ]
 ; CHECK-NEXT:    [[I_06:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT:    [[LSR_IV1:%.*]] = bitcast float* [[LSR_IV]] to i1*
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[LSR_IV]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[LSR_IV]], align 4
 ; CHECK-NEXT:    tail call void @_Z3barf(float [[TMP0]])
 ; CHECK-NEXT:    [[ADD]] = add i64 [[I_06]], [[S]]
-; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i1, i1* [[LSR_IV1]], i64 [[MUL]]
-; CHECK-NEXT:    [[TMP1]] = bitcast i1* [[SCEVGEP]] to float*
+; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 [[MUL]]
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[ADD]], [[N]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
 ;
@@ -47,8 +45,8 @@ for.cond.cleanup:                                 ; preds = %for.cond.cleanup.lo
 
 for.body:                                         ; preds = %for.body.preheader, %for.body
   %i.06 = phi i64 [ %add, %for.body ], [ 0, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds float, float* %input, i64 %i.06
-  %0 = load float, float* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds float, ptr %input, i64 %i.06
+  %0 = load float, ptr %arrayidx, align 4
   tail call void @_Z3barf(float %0) #2
   %add = add nsw i64 %i.06, %s
   %cmp = icmp slt i64 %add, %n

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/uglygep-address-space.ll b/llvm/test/Transforms/LoopStrengthReduce/uglygep-address-space.ll
index 2f4a11079d51a..1fd840ae2c015 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/uglygep-address-space.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/uglygep-address-space.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -opaque-pointers=0 < %s -loop-reduce -S | FileCheck %s
+; RUN: opt < %s -loop-reduce -S | FileCheck %s
 
 ; LSR shouldn't consider %t8 to be an interesting user of %t6, and it
 ; should be able to form pretty GEPs.
@@ -9,9 +9,9 @@ target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-
 ; Copy of uglygep with a 
diff erent address space
 ; This tests expandAddToGEP uses the right smaller integer type for
 ; another address space
-define void @Z4(i8 addrspace(1)* %ptr.i8, float addrspace(1)* addrspace(1)* %ptr.float) {
+define void @Z4(ptr addrspace(1) %ptr.i8, ptr addrspace(1) %ptr.float) {
 ; CHECK-LABEL: define void @Z4
-; CHECK-SAME: (i8 addrspace(1)* [[PTR_I8:%.*]], float addrspace(1)* addrspace(1)* [[PTR_FLOAT:%.*]]) {
+; CHECK-SAME: (ptr addrspace(1) [[PTR_I8:%.*]], ptr addrspace(1) [[PTR_FLOAT:%.*]]) {
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb1:
@@ -24,15 +24,14 @@ define void @Z4(i8 addrspace(1)* %ptr.i8, float addrspace(1)* addrspace(1)* %ptr
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb10:
 ; CHECK-NEXT:    [[T7:%.*]] = icmp eq i16 [[T4]], 0
-; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, i8 addrspace(1)* [[PTR_I8]], i16 [[T4]]
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr addrspace(1) [[PTR_I8]], i16 [[T4]]
 ; CHECK-NEXT:    br label [[BB14:%.*]]
 ; CHECK:       bb14:
-; CHECK-NEXT:    store i8 undef, i8 addrspace(1)* [[SCEVGEP]], align 1
-; CHECK-NEXT:    [[T6:%.*]] = load float addrspace(1)*, float addrspace(1)* addrspace(1)* [[PTR_FLOAT]], align 2
-; CHECK-NEXT:    [[SCEVGEP1:%.*]] = getelementptr float, float addrspace(1)* [[T6]], i16 4
-; CHECK-NEXT:    [[SCEVGEP12:%.*]] = bitcast float addrspace(1)* [[SCEVGEP1]] to i8 addrspace(1)*
-; CHECK-NEXT:    [[SCEVGEP3:%.*]] = getelementptr i8, i8 addrspace(1)* [[SCEVGEP12]], i16 [[T4]]
-; CHECK-NEXT:    store i8 undef, i8 addrspace(1)* [[SCEVGEP3]], align 1
+; CHECK-NEXT:    store i8 undef, ptr addrspace(1) [[SCEVGEP]], align 1
+; CHECK-NEXT:    [[T6:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[PTR_FLOAT]], align 2
+; CHECK-NEXT:    [[SCEVGEP1:%.*]] = getelementptr i8, ptr addrspace(1) [[T6]], i16 16
+; CHECK-NEXT:    [[SCEVGEP2:%.*]] = getelementptr i8, ptr addrspace(1) [[SCEVGEP1]], i16 [[T4]]
+; CHECK-NEXT:    store i8 undef, ptr addrspace(1) [[SCEVGEP2]], align 1
 ; CHECK-NEXT:    br label [[BB14]]
 ;
 bb:
@@ -55,12 +54,11 @@ bb10:                                             ; preds = %bb9
   br label %bb14
 
 bb14:                                             ; preds = %bb14, %bb10
-  %t2 = getelementptr inbounds i8, i8 addrspace(1)* %ptr.i8, i16 %t4 ; <i8*> [#uses=1]
-  store i8 undef, i8 addrspace(1)* %t2
-  %t6 = load float addrspace(1)*, float addrspace(1)* addrspace(1)* %ptr.float
-  %t8 = bitcast float addrspace(1)* %t6 to i8 addrspace(1)*              ; <i8*> [#uses=1]
-  %t9 = getelementptr inbounds i8, i8 addrspace(1)* %t8, i16 %t3 ; <i8*> [#uses=1]
-  store i8 undef, i8 addrspace(1)* %t9
+  %t2 = getelementptr inbounds i8, ptr addrspace(1) %ptr.i8, i16 %t4 ; <ptr> [#uses=1]
+  store i8 undef, ptr addrspace(1) %t2
+  %t6 = load ptr addrspace(1), ptr addrspace(1) %ptr.float
+  %t9 = getelementptr inbounds i8, ptr addrspace(1) %t6, i16 %t3 ; <ptr> [#uses=1]
+  store i8 undef, ptr addrspace(1) %t9
   br label %bb14
 }
 


        


More information about the llvm-commits mailing list