[llvm] f4044dd - [SLP] Precommit short load / wide math test for AArch64.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 24 08:58:00 PDT 2020


Author: Florian Hahn
Date: 2020-06-24T16:57:45+01:00
New Revision: f4044dd5392d97a2a0f1477f42e6039dd3764016

URL: https://github.com/llvm/llvm-project/commit/f4044dd5392d97a2a0f1477f42e6039dd3764016
DIFF: https://github.com/llvm/llvm-project/commit/f4044dd5392d97a2a0f1477f42e6039dd3764016.diff

LOG: [SLP] Precommit short load / wide math test for AArch64.

This pattern is key to eliminate a 10% performance regression in
SPEC2006.

Added: 
    

Modified: 
    llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
index 8278d060756f..1c0b99dc996b 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll
@@ -235,3 +235,191 @@ for.body:
   %exitcond = icmp eq i32 %indvars.iv.next , %n
   br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
 }
+
+ at global = internal global { i32* } zeroinitializer, align 8
+
+; Make sure we vectorize to maximize the load with when loading i16 and
+; extending it for compute operations.
+define void @test_i16_extend(i16* %p.1, i16* %p.2, i32 %idx.i32) {
+; CHECK-LABEL: @test_i16_extend(
+; CHECK-NEXT:    [[P_0:%.*]] = load i32*, i32** getelementptr inbounds ({ i32* }, { i32* }* @global, i64 0, i32 0), align 8
+; CHECK-NEXT:    [[IDX_0:%.*]] = zext i32 [[IDX_I32:%.*]] to i64
+; CHECK-NEXT:    [[IDX_2:%.*]] = add nuw nsw i64 [[IDX_0]], 2
+; CHECK-NEXT:    [[IDX_4:%.*]] = add nuw nsw i64 [[IDX_0]], 4
+; CHECK-NEXT:    [[IDX_6:%.*]] = add nuw nsw i64 [[IDX_0]], 6
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i16, i16* [[P_1:%.*]], i64 [[IDX_0]]
+; CHECK-NEXT:    [[TMP56:%.*]] = getelementptr inbounds i16, i16* [[P_2:%.*]], i64 [[IDX_0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i16* [[TMP53]] to <2 x i16>*
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i16>, <2 x i16>* [[TMP1]], align 2
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <2 x i16> [[TMP2]] to <2 x i32>
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i16* [[TMP56]] to <2 x i16>*
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i16>, <2 x i16>* [[TMP4]], align 2
+; CHECK-NEXT:    [[TMP6:%.*]] = zext <2 x i16> [[TMP5]] to <2 x i32>
+; CHECK-NEXT:    [[TMP7:%.*]] = sub nsw <2 x i32> [[TMP3]], [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x i32> [[TMP7]], i32 0
+; CHECK-NEXT:    [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
+; CHECK-NEXT:    [[TMP60:%.*]] = getelementptr inbounds i32, i32* [[P_0]], i64 [[TMP9]]
+; CHECK-NEXT:    [[L_1:%.*]] = load i32, i32* [[TMP60]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <2 x i32> [[TMP7]], i32 1
+; CHECK-NEXT:    [[TMP11:%.*]] = sext i32 [[TMP10]] to i64
+; CHECK-NEXT:    [[TMP71:%.*]] = getelementptr inbounds i32, i32* [[P_0]], i64 [[TMP11]]
+; CHECK-NEXT:    [[L_2:%.*]] = load i32, i32* [[TMP71]], align 4
+; CHECK-NEXT:    [[TMP75:%.*]] = getelementptr inbounds i16, i16* [[P_1]], i64 [[IDX_2]]
+; CHECK-NEXT:    [[TMP78:%.*]] = getelementptr inbounds i16, i16* [[P_2]], i64 [[IDX_2]]
+; CHECK-NEXT:    [[TMP12:%.*]] = bitcast i16* [[TMP75]] to <2 x i16>*
+; CHECK-NEXT:    [[TMP13:%.*]] = load <2 x i16>, <2 x i16>* [[TMP12]], align 2
+; CHECK-NEXT:    [[TMP14:%.*]] = zext <2 x i16> [[TMP13]] to <2 x i32>
+; CHECK-NEXT:    [[TMP15:%.*]] = bitcast i16* [[TMP78]] to <2 x i16>*
+; CHECK-NEXT:    [[TMP16:%.*]] = load <2 x i16>, <2 x i16>* [[TMP15]], align 2
+; CHECK-NEXT:    [[TMP17:%.*]] = zext <2 x i16> [[TMP16]] to <2 x i32>
+; CHECK-NEXT:    [[TMP18:%.*]] = sub nsw <2 x i32> [[TMP14]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = extractelement <2 x i32> [[TMP18]], i32 0
+; CHECK-NEXT:    [[TMP20:%.*]] = sext i32 [[TMP19]] to i64
+; CHECK-NEXT:    [[TMP82:%.*]] = getelementptr inbounds i32, i32* [[P_0]], i64 [[TMP20]]
+; CHECK-NEXT:    [[L_3:%.*]] = load i32, i32* [[TMP82]], align 4
+; CHECK-NEXT:    [[TMP21:%.*]] = extractelement <2 x i32> [[TMP18]], i32 1
+; CHECK-NEXT:    [[TMP22:%.*]] = sext i32 [[TMP21]] to i64
+; CHECK-NEXT:    [[TMP93:%.*]] = getelementptr inbounds i32, i32* [[P_0]], i64 [[TMP22]]
+; CHECK-NEXT:    [[L_4:%.*]] = load i32, i32* [[TMP93]], align 4
+; CHECK-NEXT:    [[TMP97:%.*]] = getelementptr inbounds i16, i16* [[P_1]], i64 [[IDX_4]]
+; CHECK-NEXT:    [[TMP100:%.*]] = getelementptr inbounds i16, i16* [[P_2]], i64 [[IDX_4]]
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast i16* [[TMP97]] to <2 x i16>*
+; CHECK-NEXT:    [[TMP24:%.*]] = load <2 x i16>, <2 x i16>* [[TMP23]], align 2
+; CHECK-NEXT:    [[TMP25:%.*]] = zext <2 x i16> [[TMP24]] to <2 x i32>
+; CHECK-NEXT:    [[TMP26:%.*]] = bitcast i16* [[TMP100]] to <2 x i16>*
+; CHECK-NEXT:    [[TMP27:%.*]] = load <2 x i16>, <2 x i16>* [[TMP26]], align 2
+; CHECK-NEXT:    [[TMP28:%.*]] = zext <2 x i16> [[TMP27]] to <2 x i32>
+; CHECK-NEXT:    [[TMP29:%.*]] = sub nsw <2 x i32> [[TMP25]], [[TMP28]]
+; CHECK-NEXT:    [[TMP30:%.*]] = extractelement <2 x i32> [[TMP29]], i32 0
+; CHECK-NEXT:    [[TMP31:%.*]] = sext i32 [[TMP30]] to i64
+; CHECK-NEXT:    [[TMP104:%.*]] = getelementptr inbounds i32, i32* [[P_0]], i64 [[TMP31]]
+; CHECK-NEXT:    [[L_5:%.*]] = load i32, i32* [[TMP104]], align 4
+; CHECK-NEXT:    [[TMP32:%.*]] = extractelement <2 x i32> [[TMP29]], i32 1
+; CHECK-NEXT:    [[TMP33:%.*]] = sext i32 [[TMP32]] to i64
+; CHECK-NEXT:    [[TMP115:%.*]] = getelementptr inbounds i32, i32* [[P_0]], i64 [[TMP33]]
+; CHECK-NEXT:    [[L_6:%.*]] = load i32, i32* [[TMP115]], align 4
+; CHECK-NEXT:    [[TMP119:%.*]] = getelementptr inbounds i16, i16* [[P_1]], i64 [[IDX_6]]
+; CHECK-NEXT:    [[TMP122:%.*]] = getelementptr inbounds i16, i16* [[P_2]], i64 [[IDX_6]]
+; CHECK-NEXT:    [[TMP34:%.*]] = bitcast i16* [[TMP119]] to <2 x i16>*
+; CHECK-NEXT:    [[TMP35:%.*]] = load <2 x i16>, <2 x i16>* [[TMP34]], align 2
+; CHECK-NEXT:    [[TMP36:%.*]] = zext <2 x i16> [[TMP35]] to <2 x i32>
+; CHECK-NEXT:    [[TMP37:%.*]] = bitcast i16* [[TMP122]] to <2 x i16>*
+; CHECK-NEXT:    [[TMP38:%.*]] = load <2 x i16>, <2 x i16>* [[TMP37]], align 2
+; CHECK-NEXT:    [[TMP39:%.*]] = zext <2 x i16> [[TMP38]] to <2 x i32>
+; CHECK-NEXT:    [[TMP40:%.*]] = sub nsw <2 x i32> [[TMP36]], [[TMP39]]
+; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <2 x i32> [[TMP40]], i32 0
+; CHECK-NEXT:    [[TMP42:%.*]] = sext i32 [[TMP41]] to i64
+; CHECK-NEXT:    [[TMP126:%.*]] = getelementptr inbounds i32, i32* [[P_0]], i64 [[TMP42]]
+; CHECK-NEXT:    [[L_7:%.*]] = load i32, i32* [[TMP126]], align 4
+; CHECK-NEXT:    [[TMP43:%.*]] = extractelement <2 x i32> [[TMP40]], i32 1
+; CHECK-NEXT:    [[TMP44:%.*]] = sext i32 [[TMP43]] to i64
+; CHECK-NEXT:    [[TMP137:%.*]] = getelementptr inbounds i32, i32* [[P_0]], i64 [[TMP44]]
+; CHECK-NEXT:    [[L_8:%.*]] = load i32, i32* [[TMP137]], align 4
+; CHECK-NEXT:    call void @use(i32 [[L_1]], i32 [[L_2]], i32 [[L_3]], i32 [[L_4]], i32 [[L_5]], i32 [[L_6]], i32 [[L_7]], i32 [[L_8]])
+; CHECK-NEXT:    ret void
+;
+  %g = getelementptr inbounds { i32*}, { i32 *}* @global, i64 0, i32 0
+  %p.0 = load i32*, i32** %g, align 8
+
+  %idx.0 = zext i32 %idx.i32 to i64
+  %idx.1 = add nsw i64 %idx.0, 1
+  %idx.2 = add nsw i64 %idx.0, 2
+  %idx.3 = add nsw i64 %idx.0, 3
+  %idx.4 = add nsw i64 %idx.0, 4
+  %idx.5 = add nsw i64 %idx.0, 5
+  %idx.6 = add nsw i64 %idx.0, 6
+  %idx.7 = add nsw i64 %idx.0, 7
+
+  %tmp53 = getelementptr inbounds i16, i16* %p.1, i64 %idx.0
+  %op1.l = load i16, i16* %tmp53, align 2
+  %op1.ext = zext i16 %op1.l to i64
+  %tmp56 = getelementptr inbounds i16, i16* %p.2, i64 %idx.0
+  %op2.l = load i16, i16* %tmp56, align 2
+  %op2.ext = zext i16 %op2.l to i64
+  %sub.1 = sub nsw i64 %op1.ext, %op2.ext
+
+  %tmp60 = getelementptr inbounds i32, i32* %p.0, i64 %sub.1
+  %l.1 = load i32, i32* %tmp60, align 4
+
+  %tmp64 = getelementptr inbounds i16, i16* %p.1, i64 %idx.1
+  %tmp65 = load i16, i16* %tmp64, align 2
+  %tmp66 = zext i16 %tmp65 to i64
+  %tmp67 = getelementptr inbounds i16, i16* %p.2, i64 %idx.1
+  %tmp68 = load i16, i16* %tmp67, align 2
+  %tmp69 = zext i16 %tmp68 to i64
+  %sub.2 = sub nsw i64 %tmp66, %tmp69
+
+  %tmp71 = getelementptr inbounds i32, i32* %p.0, i64 %sub.2
+  %l.2 = load i32, i32* %tmp71, align 4
+
+  %tmp75 = getelementptr inbounds i16, i16* %p.1, i64 %idx.2
+  %tmp76 = load i16, i16* %tmp75, align 2
+  %tmp77 = zext i16 %tmp76 to i64
+  %tmp78 = getelementptr inbounds i16, i16* %p.2, i64 %idx.2
+  %tmp79 = load i16, i16* %tmp78, align 2
+  %tmp80 = zext i16 %tmp79 to i64
+  %sub.3 = sub nsw i64 %tmp77, %tmp80
+
+  %tmp82 = getelementptr inbounds i32, i32* %p.0, i64 %sub.3
+  %l.3 = load i32, i32* %tmp82, align 4
+
+  %tmp86 = getelementptr inbounds i16, i16* %p.1, i64 %idx.3
+  %tmp87 = load i16, i16* %tmp86, align 2
+  %tmp88 = zext i16 %tmp87 to i64
+  %tmp89 = getelementptr inbounds i16, i16* %p.2, i64 %idx.3
+  %tmp90 = load i16, i16* %tmp89, align 2
+  %tmp91 = zext i16 %tmp90 to i64
+  %sub.4 = sub nsw i64 %tmp88, %tmp91
+
+  %tmp93 = getelementptr inbounds i32, i32* %p.0, i64 %sub.4
+  %l.4 = load i32, i32* %tmp93, align 4
+
+  %tmp97 = getelementptr inbounds i16, i16* %p.1, i64 %idx.4
+  %tmp98 = load i16, i16* %tmp97, align 2
+  %tmp99 = zext i16 %tmp98 to i64
+  %tmp100 = getelementptr inbounds i16, i16* %p.2, i64 %idx.4
+  %tmp101 = load i16, i16* %tmp100, align 2
+  %tmp102 = zext i16 %tmp101 to i64
+  %sub.5 = sub nsw i64 %tmp99, %tmp102
+
+  %tmp104 = getelementptr inbounds i32, i32* %p.0, i64 %sub.5
+  %l.5 = load i32, i32* %tmp104, align 4
+
+  %tmp108 = getelementptr inbounds i16, i16* %p.1, i64 %idx.5
+  %tmp109 = load i16, i16* %tmp108, align 2
+  %tmp110 = zext i16 %tmp109 to i64
+  %tmp111 = getelementptr inbounds i16, i16* %p.2, i64 %idx.5
+  %tmp112 = load i16, i16* %tmp111, align 2
+  %tmp113 = zext i16 %tmp112 to i64
+  %sub.6 = sub nsw i64 %tmp110, %tmp113
+
+  %tmp115 = getelementptr inbounds i32, i32* %p.0, i64 %sub.6
+  %l.6 = load i32, i32* %tmp115, align 4
+
+  %tmp119 = getelementptr inbounds i16, i16* %p.1, i64 %idx.6
+  %tmp120 = load i16, i16* %tmp119, align 2
+  %tmp121 = zext i16 %tmp120 to i64
+  %tmp122 = getelementptr inbounds i16, i16* %p.2, i64 %idx.6
+  %tmp123 = load i16, i16* %tmp122, align 2
+  %tmp124 = zext i16 %tmp123 to i64
+  %sub.7 = sub nsw i64 %tmp121, %tmp124
+
+  %tmp126 = getelementptr inbounds i32, i32* %p.0, i64 %sub.7
+  %l.7 = load i32, i32* %tmp126, align 4
+
+  %tmp130 = getelementptr inbounds i16, i16* %p.1, i64 %idx.7
+  %tmp131 = load i16, i16* %tmp130, align 2
+  %tmp132 = zext i16 %tmp131 to i64
+  %tmp133 = getelementptr inbounds i16, i16* %p.2, i64 %idx.7
+  %tmp134 = load i16, i16* %tmp133, align 2
+  %tmp135 = zext i16 %tmp134 to i64
+  %sub.8 = sub nsw i64 %tmp132, %tmp135
+
+  %tmp137 = getelementptr inbounds i32, i32* %p.0, i64 %sub.8
+  %l.8 = load i32, i32* %tmp137, align 4
+
+  call void @use(i32 %l.1, i32 %l.2, i32 %l.3, i32 %l.4, i32 %l.5, i32 %l.6, i32 %l.7, i32 %l.8)
+  ret void
+}
+
+declare void @use(i32, i32, i32, i32, i32, i32, i32, i32)


        


More information about the llvm-commits mailing list