Index: test/Other/AddressSpace/scev-constant-fold-gep-0.ll =================================================================== --- test/Other/AddressSpace/scev-constant-fold-gep-0.ll (revision 0) +++ test/Other/AddressSpace/scev-constant-fold-gep-0.ll (working copy) @@ -0,0 +1,140 @@ +; "SCEV" - ScalarEvolution. +; RUN: opt -analyze -scalar-evolution < %s | FileCheck --check-prefix=SCEV %s +target datalayout = "e-p:128:128:128-p1:32:32:32-p2:8:8:8-p3:16:16:16-p4:64:64:64-p5:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32" +; SCEV: Classifying expressions for: @goo8 +; SCEV: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* +; SCEV: --> ((-1 * sizeof(i8)) + inttoptr (i32 1 to i8 addrspace(1)*)) +; SCEV: Classifying expressions for: @goo1 +; SCEV: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* +; SCEV: --> ((-1 * sizeof(i1)) + inttoptr (i32 1 to i1 addrspace(2)*)) +; SCEV: Classifying expressions for: @foo8 +; SCEV: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)* +; SCEV: --> ((-2 * sizeof(i8)) + inttoptr (i32 1 to i8 addrspace(1)*)) +; SCEV: Classifying expressions for: @foo1 +; SCEV: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)* +; SCEV: --> ((-2 * sizeof(i1)) + inttoptr (i32 1 to i1 addrspace(2)*)) +; SCEV: Classifying expressions for: @hoo8 +; SCEV: --> (-1 * sizeof(i8)) +; SCEV: Classifying expressions for: @hoo1 +; SCEV: --> (-1 * sizeof(i1)) + +define i8 addrspace(1)* @goo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @goo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} +define i8 addrspace(1)* @foo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @foo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} +define i8 addrspace(1)* @hoo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @hoo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 0 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} +; SCEV: Classifying expressions for: @fa +; SCEV: %t = bitcast i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310) to i64 +; SCEV: --> (2310 * sizeof(double)) +; SCEV: Classifying expressions for: @fb +; SCEV: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64 +; SCEV: --> alignof(double) +; SCEV: Classifying expressions for: @fc +; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2) to i64 +; SCEV: --> (2 * sizeof(double)) +; SCEV: Classifying expressions for: @fd +; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11) to i64 +; SCEV: --> (11 * sizeof(double)) +; SCEV: Classifying expressions for: @fe +; SCEV: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64 +; SCEV: --> offsetof({ double, float, double, double }, 2) +; SCEV: Classifying expressions for: @ff +; SCEV: %t = bitcast i64 1 to i64 +; SCEV: --> 1 +; SCEV: Classifying expressions for: @fg +; SCEV: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64 +; SCEV: --> alignof(double) +; SCEV: Classifying expressions for: @fh +; SCEV: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64) to i64 +; SCEV: --> sizeof(i1 addrspace(2)*) +; SCEV: Classifying expressions for: @fi +; SCEV: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64) to i64 +; SCEV: --> alignof(i1 addrspace(2)*) + +define i64 @fa() nounwind { + %t = bitcast i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5)) to i64 + ret i64 %t +} +define i64 @fb() nounwind { + %t = bitcast i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fc() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64 + ret i64 %t +} +define i64 @fd() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) to i64 + ret i64 %t +} +define i64 @fe() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64 + ret i64 %t +} +define i64 @ff() nounwind { + %t = bitcast i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fg() nounwind { + %t = bitcast i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fh() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fi() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double}addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} + +; SCEV: Classifying expressions for: @fM +; SCEV: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64* +; SCEV: --> sizeof(i64) +; SCEV: Classifying expressions for: @fN +; SCEV: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64* +; SCEV: --> sizeof(i64) +; SCEV: Classifying expressions for: @fO +; SCEV: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64* +; SCEV: --> sizeof(i64) + +define i64* @fM() nounwind { + %t = bitcast i64* getelementptr (i64* null, i32 1) to i64* + ret i64* %t +} +define i64* @fN() nounwind { + %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64* + ret i64* %t +} +define i64* @fO() nounwind { + %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64* + ret i64* %t +} +; SCEV: Classifying expressions for: @fZ +; SCEV: %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)* +; SCEV: --> ((3 addrspace(1)* sizeof(i32)) + @ext2) + +@ext2 = external addrspace(1) global [3 x { i32, i32 }] +define i32 addrspace(1)* @fZ() nounwind { + %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)* + ret i32 addrspace(1)* %t +} Index: test/Other/AddressSpace/to-constant-fold-gep-0.ll =================================================================== --- test/Other/AddressSpace/to-constant-fold-gep-0.ll (revision 0) +++ test/Other/AddressSpace/to-constant-fold-gep-0.ll (working copy) @@ -0,0 +1,198 @@ +; "TO" - Optimizations and targetdata. This tests target-dependent +; folding in the optimizers. +; RUN: opt -S -o - -instcombine -globalopt < %s | FileCheck --check-prefix=TO %s +target datalayout = "e-p:128:128:128-p1:32:32:32-p2:8:8:8-p3:16:16:16-p4:64:64:64-p5:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32" +; TO: ModuleID = '' +; TO: target datalayout = "e-p:128:128:128-p1:32:32:32-p2:8:8:8-p3:16:16:16-p4:64:64:64-p5:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32" + + +; TO: @G8 = global i8 addrspace(1)* null +@G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) +; TO: @G1 = global i1 addrspace(2)* null +@G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1) +; TO: @F8 = global i8 addrspace(1)* inttoptr (i64 -1 to i8 addrspace(1)*) +@F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) +; TO: @F1 = global i1 addrspace(2)* inttoptr (i64 -1 to i1 addrspace(2)*) +@F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2) +; TO: @H8 = global i8 addrspace(1)* inttoptr (i64 -1 to i8 addrspace(1)*) +@H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) +; TO: @H1 = global i1 addrspace(2)* inttoptr (i32 -1 to i1 addrspace(2)*) +@H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 0 to i1 addrspace(2)*), i8 -1) +; TO: @a = constant i64 18480 +@a = constant i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]} addrspace(4)* getelementptr ({[7 x double], [7 x double]} addrspace(4)* null, i64 11) to i64), i64 5)) + +; TO: @b = constant i64 8 +@b = constant i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) + +; TO: @c = constant i64 16 +@c = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) + +; TO: @d = constant i64 88 +@d = constant i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) + +; TO: @e = constant i64 16 +@e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) + +; TO: @f = constant i64 1 +@f = constant i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) + +; TO: @g = constant i64 8 +@g = constant i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) +; TO: @h = constant i64 8 +@h = constant i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i64 1) to i64) + +; TO: @i = constant i64 8 +@i = constant i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double} addrspace(4)* null, i64 0, i32 1) to i64) + +; TO: @M = constant i64 addrspace(5)* inttoptr (i64 8 to i64 addrspace(5)*) +; TO: @N = constant i64 addrspace(5)* inttoptr (i64 8 to i64 addrspace(5)*) +; TO: @O = constant i64 addrspace(5)* inttoptr (i64 8 to i64 addrspace(5)*) + +@M = constant i64 addrspace(5)* getelementptr (i64 addrspace(5)* null, i32 1) +@N = constant i64 addrspace(5)* getelementptr ({ i64, i64 } addrspace(5)* null, i32 0, i32 1) +@O = constant i64 addrspace(5)* getelementptr ([2 x i64] addrspace(5)* null, i32 0, i32 1) + +; TO: @Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr ([3 x { i32, i32 }]addrspace(3)* @ext, i64 2) +@ext = external addrspace(3) global [3 x { i32, i32 }] +@Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 1), i64 1) + +; TO: @Z = global i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 1) +@Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1) + +; TO: define i8 addrspace(1)* @goo8() nounwind { +; TO: ret i8 addrspace(1)* null +; TO: } +; TO: define i1 addrspace(2)* @goo1() nounwind { +; TO: ret i1 addrspace(2)* null +; TO: } +; TO: define i8 addrspace(1)* @foo8() nounwind { +; TO: ret i8 addrspace(1)* inttoptr (i64 -1 to i8 addrspace(1)*) +; TO: } +; TO: define i1 addrspace(2)* @foo1() nounwind { +; TO: ret i1 addrspace(2)* inttoptr (i64 -1 to i1 addrspace(2)*) +; TO: } +; TO: define i8 addrspace(1)* @hoo8() nounwind { +; TO: ret i8 addrspace(1)* inttoptr (i64 -1 to i8 addrspace(1)*) +; TO: } +; TO: define i1 addrspace(2)* @hoo1() nounwind { +; TO: ret i1 addrspace(2)* inttoptr (i64 -1 to i1 addrspace(2)*) +; TO: } +define i8 addrspace(1)* @goo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @goo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} +define i8 addrspace(1)* @foo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @foo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} +define i8 addrspace(1)* @hoo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @hoo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 0 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} + +; TO: define i64 @fa() nounwind { +; TO: ret i64 18480 +; TO: } +; TO: define i64 @fb() nounwind { +; TO: ret i64 8 +; TO: } +; TO: define i64 @fc() nounwind { +; TO: ret i64 16 +; TO: } +; TO: define i64 @fd() nounwind { +; TO: ret i64 88 +; TO: } +; TO: define i64 @fe() nounwind { +; TO: ret i64 16 +; TO: } +; TO: define i64 @ff() nounwind { +; TO: ret i64 1 +; TO: } +; TO: define i64 @fg() nounwind { +; TO: ret i64 8 +; TO: } +; TO: define i64 @fh() nounwind { +; TO: ret i64 8 +; TO: } +; TO: define i64 @fi() nounwind { +; TO: ret i64 8 +; TO: } +define i64 @fa() nounwind { + %t = bitcast i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5)) to i64 + ret i64 %t +} +define i64 @fb() nounwind { + %t = bitcast i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fc() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64 + ret i64 %t +} +define i64 @fd() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) to i64 + ret i64 %t +} +define i64 @fe() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64 + ret i64 %t +} +define i64 @ff() nounwind { + %t = bitcast i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fg() nounwind { + %t = bitcast i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fh() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fi() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double}addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} + +; TO: define i64* @fM() nounwind { +; TO: ret i64* inttoptr (i64 8 to i64*) +; TO: } +; TO: define i64* @fN() nounwind { +; TO: ret i64* inttoptr (i64 8 to i64*) +; TO: } +; TO: define i64* @fO() nounwind { +; TO: ret i64* inttoptr (i64 8 to i64*) +; TO: } + +define i64* @fM() nounwind { + %t = bitcast i64* getelementptr (i64* null, i32 1) to i64* + ret i64* %t +} +define i64* @fN() nounwind { + %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64* + ret i64* %t +} +define i64* @fO() nounwind { + %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64* + ret i64* %t +} +; TO: define i32 addrspace(1)* @fZ() nounwind { +; TO: ret i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 1) +; TO: } +@ext2 = external addrspace(1) global [3 x { i32, i32 }] +define i32 addrspace(1)* @fZ() nounwind { + %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)* + ret i32 addrspace(1)* %t +} Index: test/Other/AddressSpace/opt-constant-fold-gep-0.ll =================================================================== --- test/Other/AddressSpace/opt-constant-fold-gep-0.ll (revision 0) +++ test/Other/AddressSpace/opt-constant-fold-gep-0.ll (working copy) @@ -0,0 +1,214 @@ +; "OPT" - Optimizations but no targetdata. This tests target-independent +; folding in the optimizers. +; RUN: opt -S -o - -instcombine -globalopt < %s | FileCheck --check-prefix=OPT %s +target datalayout = "e-p:128:128:128-p1:32:32:32-p2:8:8:8-p3:16:16:16-p4:64:64:64-p5:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32" + +; OPT: ModuleID = '' +; OPT: target datalayout = "e-p:128:128:128-p1:32:32:32-p2:8:8:8-p3:16:16:16-p4:64:64:64-p5:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32" + +; OPT: @G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) +@G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) +; OPT: @G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1) +@G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1) +; OPT: @F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) +@F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) +; OPT: @F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2) +@F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2) +; OPT: @H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1) +@H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) +; OPT: @H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i8 -1) +@H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 0 to i1 addrspace(2)*), i8 -1) + + +; The target-independent folder should be able to do some clever +; simplifications on sizeof, alignof, and offsetof expressions. The +; target-dependent folder should fold these down to constants. +; OPT: @a = constant i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310) +@a = constant i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]} addrspace(4)* getelementptr ({[7 x double], [7 x double]} addrspace(4)* null, i64 11) to i64), i64 5)) + +; OPT: @b = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) +@b = constant i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) + +; OPT: @c = constant i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2) +@c = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) + +; OPT: @d = constant i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11) +@d = constant i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) + +; OPT: @e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) +@e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) + +; OPT: @f = constant i64 1 +@f = constant i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) + +; OPT: @g = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) +@g = constant i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) + +; OPT: @h = constant i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64) +@h = constant i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i64 1) to i64) + +; OPT: @i = constant i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64) +@i = constant i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double} addrspace(4)* null, i64 0, i32 1) to i64) + +; The target-dependent folder should cast GEP indices to integer-sized pointers. + +; OPT: @M = constant i64 addrspace(5)* getelementptr (i64 addrspace(5)* null, i32 1) +; OPT: @N = constant i64 addrspace(5)* getelementptr ({ i64, i64 } addrspace(5)* null, i32 0, i32 1) +; OPT: @O = constant i64 addrspace(5)* getelementptr ([2 x i64] addrspace(5)* null, i32 0, i32 1) + +@M = constant i64 addrspace(5)* getelementptr (i64 addrspace(5)* null, i32 1) +@N = constant i64 addrspace(5)* getelementptr ({ i64, i64 } addrspace(5)* null, i32 0, i32 1) +@O = constant i64 addrspace(5)* getelementptr ([2 x i64] addrspace(5)* null, i32 0, i32 1) + +; Fold GEP of a GEP. Very simple cases are folded. + +; OPT: @Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr ([3 x { i32, i32 }]addrspace(3)* @ext, i64 2) +@ext = external addrspace(3) global [3 x { i32, i32 }] +@Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 1), i64 1) + +; OPT: @Z = global i32addrspace(3)* getelementptr (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1) +@Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1) + + +; Duplicate all of the above as function return values rather than +; global initializers. + +; OPT: define i8 addrspace(1)* @goo8() nounwind { +; OPT: ret i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) +; OPT: } +; OPT: define i1 addrspace(2)* @goo1() nounwind { +; OPT: ret i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) +; OPT: } +; OPT: define i8 addrspace(1)* @foo8() nounwind { +; OPT: ret i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) +; OPT: } +; OPT: define i1 addrspace(2)* @foo1() nounwind { +; OPT: ret i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) +; OPT: } +; OPT: define i8 addrspace(1)* @hoo8() nounwind { +; OPT: ret i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1) +; OPT: } +; OPT: define i1 addrspace(2)* @hoo1() nounwind { +; OPT: ret i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 -1) +; OPT: } +define i8 addrspace(1)* @goo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @goo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} +define i8 addrspace(1)* @foo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @foo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} +define i8 addrspace(1)* @hoo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @hoo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 0 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} + +; OPT: define i64 @fa() nounwind { +; OPT: ret i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310) +; OPT: } +; OPT: define i64 @fb() nounwind { +; OPT: ret i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) +; OPT: } +; OPT: define i64 @fc() nounwind { +; OPT: ret i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2) +; OPT: } +; OPT: define i64 @fd() nounwind { +; OPT: ret i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11) +; OPT: } +; OPT: define i64 @fe() nounwind { +; OPT: ret i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) +; OPT: } +; OPT: define i64 @ff() nounwind { +; OPT: ret i64 1 +; OPT: } +; OPT: define i64 @fg() nounwind { +; OPT: ret i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) +; OPT: } +; OPT: define i64 @fh() nounwind { +; OPT: ret i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64) +; OPT: } +; OPT: define i64 @fi() nounwind { +; OPT: ret i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64) +; OPT: } + +define i64 @fa() nounwind { + %t = bitcast i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5)) to i64 + ret i64 %t +} +define i64 @fb() nounwind { + %t = bitcast i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fc() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64 + ret i64 %t +} +define i64 @fd() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) to i64 + ret i64 %t +} +define i64 @fe() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64 + ret i64 %t +} +define i64 @ff() nounwind { + %t = bitcast i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fg() nounwind { + %t = bitcast i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fh() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fi() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double}addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} + +; OPT: define i64* @fM() nounwind { +; OPT: ret i64* getelementptr (i64* null, i32 1) +; OPT: } +; OPT: define i64* @fN() nounwind { +; OPT: ret i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) +; OPT: } +; OPT: define i64* @fO() nounwind { +; OPT: ret i64* getelementptr ([2 x i64]* null, i32 0, i32 1) +; OPT: } + +define i64* @fM() nounwind { + %t = bitcast i64* getelementptr (i64* null, i32 1) to i64* + ret i64* %t +} +define i64* @fN() nounwind { + %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64* + ret i64* %t +} +define i64* @fO() nounwind { + %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64* + ret i64* %t +} + +; OPT: define i32 addrspace(1)* @fZ() nounwind { +; OPT: ret i32 addrspace(1)* getelementptr (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) +; OPT: } +@ext2 = external addrspace(1) global [3 x { i32, i32 }] +define i32 addrspace(1)* @fZ() nounwind { + %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)* + ret i32 addrspace(1)* %t +} Index: test/Other/AddressSpace/constant-fold-gep-as-0.ll =================================================================== --- test/Other/AddressSpace/constant-fold-gep-as-0.ll (revision 0) +++ test/Other/AddressSpace/constant-fold-gep-as-0.ll (working copy) @@ -0,0 +1,235 @@ +; "PLAIN" - No optimizations. This tests the target-independent +; constant folder. +; RUN: opt -S -o - < %s | FileCheck --check-prefix=PLAIN %s + +target datalayout = "e-p:128:128:128-p1:32:32:32-p2:8:8:8-p3:16:16:16-p4:64:64:64-p5:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32" + +; PLAIN: ModuleID = '' + +; The automatic constant folder in opt does not have targetdata access, so +; it can't fold gep arithmetic, in general. However, the constant folder run +; from instcombine and global opt can use targetdata. +; PLAIN: @G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) +@G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) +; PLAIN: @G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1) +@G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1) +; PLAIN: @F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) +@F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) +; PLAIN: @F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2) +@F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2) +; PLAIN: @H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1) +@H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) +; PLAIN: @H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i8 -1) +@H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 0 to i1 addrspace(2)*), i8 -1) + + +; The target-independent folder should be able to do some clever +; simplifications on sizeof, alignof, and offsetof expressions. The +; target-dependent folder should fold these down to constants. +; PLAIN-X: @a = constant i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310) +@a = constant i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]} addrspace(4)* getelementptr ({[7 x double], [7 x double]} addrspace(4)* null, i64 11) to i64), i64 5)) + +; PLAIN-X: @b = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) +@b = constant i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) + +; PLAIN-X: @c = constant i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2) +@c = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) + +; PLAIN-X: @d = constant i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11) +@d = constant i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) + +; PLAIN-X: @e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) +@e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) + +; PLAIN-X: @f = constant i64 1 +@f = constant i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) + +; PLAIN-X: @g = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) +@g = constant i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) + +; PLAIN-X: @h = constant i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64) +@h = constant i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i64 1) to i64) + +; PLAIN-X: @i = constant i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64) +@i = constant i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double} addrspace(4)* null, i64 0, i32 1) to i64) + +; The target-dependent folder should cast GEP indices to integer-sized pointers. + +; PLAIN: @M = constant i64 addrspace(5)* getelementptr (i64 addrspace(5)* null, i32 1) +; PLAIN: @N = constant i64 addrspace(5)* getelementptr ({ i64, i64 } addrspace(5)* null, i32 0, i32 1) +; PLAIN: @O = constant i64 addrspace(5)* getelementptr ([2 x i64] addrspace(5)* null, i32 0, i32 1) + +@M = constant i64 addrspace(5)* getelementptr (i64 addrspace(5)* null, i32 1) +@N = constant i64 addrspace(5)* getelementptr ({ i64, i64 } addrspace(5)* null, i32 0, i32 1) +@O = constant i64 addrspace(5)* getelementptr ([2 x i64] addrspace(5)* null, i32 0, i32 1) + +; Fold GEP of a GEP. Very simple cases are folded. + +; PLAIN-X: @Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 2) +@ext = external addrspace(3) global [3 x { i32, i32 }] +@Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 1), i64 1) + +; PLAIN-X: @Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1) +@Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1) + + +; Duplicate all of the above as function return values rather than +; global initializers. + +; PLAIN: define i8 addrspace(1)* @goo8() nounwind { +; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* +; PLAIN: ret i8 addrspace(1)* %t +; PLAIN: } +; PLAIN: define i1 addrspace(2)* @goo1() nounwind { +; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* +; PLAIN: ret i1 addrspace(2)* %t +; PLAIN: } +; PLAIN: define i8 addrspace(1)* @foo8() nounwind { +; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)* +; PLAIN: ret i8 addrspace(1)* %t +; PLAIN: } +; PLAIN: define i1 addrspace(2)* @foo1() nounwind { +; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)* +; PLAIN: ret i1 addrspace(2)* %t +; PLAIN: } +; PLAIN: define i8 addrspace(1)* @hoo8() nounwind { +; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1) to i8 addrspace(1)* +; PLAIN: ret i8 addrspace(1)* %t +; PLAIN: } +; PLAIN: define i1 addrspace(2)* @hoo1() nounwind { +; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 -1) to i1 addrspace(2)* +; PLAIN: ret i1 addrspace(2)* %t +; PLAIN: } +define i8 addrspace(1)* @goo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @goo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} +define i8 addrspace(1)* @foo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @foo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} +define i8 addrspace(1)* @hoo8() nounwind { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @hoo1() nounwind { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 0 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} + +; PLAIN-X: define i64 @fa() nounwind { +; PLAIN-X: %t = bitcast i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fb() nounwind { +; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fc() nounwind { +; PLAIN-X: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fd() nounwind { +; PLAIN-X: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fe() nounwind { +; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @ff() nounwind { +; PLAIN-X: %t = bitcast i64 1 to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fg() nounwind { +; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fh() nounwind { +; PLAIN-X: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fi() nounwind { +; PLAIN-X: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +define i64 @fa() nounwind { + %t = bitcast i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5)) to i64 + ret i64 %t +} +define i64 @fb() nounwind { + %t = bitcast i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fc() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64 + ret i64 %t +} +define i64 @fd() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) to i64 + ret i64 %t +} +define i64 @fe() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64 + ret i64 %t +} +define i64 @ff() nounwind { + %t = bitcast i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fg() nounwind { + %t = bitcast i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fh() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fi() nounwind { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double}addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} + +; PLAIN: define i64* @fM() nounwind { +; PLAIN: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64* +; PLAIN: ret i64* %t +; PLAIN: } +; PLAIN: define i64* @fN() nounwind { +; PLAIN: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64* +; PLAIN: ret i64* %t +; PLAIN: } +; PLAIN: define i64* @fO() nounwind { +; PLAIN: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64* +; PLAIN: ret i64* %t +; PLAIN: } + +define i64* @fM() nounwind { + %t = bitcast i64* getelementptr (i64* null, i32 1) to i64* + ret i64* %t +} +define i64* @fN() nounwind { + %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64* + ret i64* %t +} +define i64* @fO() nounwind { + %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64* + ret i64* %t +} + +; PLAIN: define i32 addrspace(1)* @fZ() nounwind { +; PLAIN: %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)* +; PLAIN: ret i32 addrspace(1)* %t +; PLAIN: } +@ext2 = external addrspace(1) global [3 x { i32, i32 }] +define i32 addrspace(1)* @fZ() nounwind { + %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)* + ret i32 addrspace(1)* %t +} Index: test/Other/multi-pointer-size.ll =================================================================== --- test/Other/multi-pointer-size.ll (revision 0) +++ test/Other/multi-pointer-size.ll (working copy) @@ -0,0 +1,43 @@ +; RUN: opt -instcombine %s | llvm-dis | FileCheck %s +target datalayout = "e-p:32:32:32-p1:64:64:64-p2:8:8:8-p3:16:16:16--p4:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32" + +define i32 @test_as0(i32 addrspace(0)* %A) { +entry: +; CHECK: %arrayidx = getelementptr i32* %A, i32 1 + %arrayidx = getelementptr i32 addrspace(0)* %A, i64 1 + %y = load i32 addrspace(0)* %arrayidx, align 4 + ret i32 %y +} + +define i32 @test_as1(i32 addrspace(1)* %A) { +entry: +; CHECK: %arrayidx = getelementptr i32 addrspace(1)* %A, i64 1 + %arrayidx = getelementptr i32 addrspace(1)* %A, i32 1 + %y = load i32 addrspace(1)* %arrayidx, align 4 + ret i32 %y +} + +define i32 @test_as2(i32 addrspace(2)* %A) { +entry: +; CHECK: %arrayidx = getelementptr i32 addrspace(2)* %A, i8 1 + %arrayidx = getelementptr i32 addrspace(2)* %A, i32 1 + %y = load i32 addrspace(2)* %arrayidx, align 4 + ret i32 %y +} + +define i32 @test_as3(i32 addrspace(3)* %A) { +entry: +; CHECK: %arrayidx = getelementptr i32 addrspace(3)* %A, i16 1 + %arrayidx = getelementptr i32 addrspace(3)* %A, i32 1 + %y = load i32 addrspace(3)* %arrayidx, align 4 + ret i32 %y +} + +define i32 @test_as4(i32 addrspace(4)* %A) { +entry: +; CHECK: %arrayidx = getelementptr i32 addrspace(4)* %A, i96 1 + %arrayidx = getelementptr i32 addrspace(4)* %A, i32 1 + %y = load i32 addrspace(4)* %arrayidx, align 4 + ret i32 %y +} +