[llvm] bd310a5 - [Hexagon] Remove -opaque-pointers=0 from tests

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 5 07:28:53 PDT 2023


Author: Krzysztof Parzyszek
Date: 2023-04-05T07:28:33-07:00
New Revision: bd310a5ccabbc93d029196d1972a8a25bd93d844

URL: https://github.com/llvm/llvm-project/commit/bd310a5ccabbc93d029196d1972a8a25bd93d844
DIFF: https://github.com/llvm/llvm-project/commit/bd310a5ccabbc93d029196d1972a8a25bd93d844.diff

LOG: [Hexagon] Remove -opaque-pointers=0 from tests

Two tests still had opaque-pointers=0:
(1) llvm/test/CodeGen/Hexagon/addrmode.ll
(2) llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll

Deleted (1) since it no longer exercised the original scenario, modified
(2) to reflect codegen changes.

This fixes https://github.com/llvm/llvm-project/issues/61928

Added: 
    

Modified: 
    llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll

Removed: 
    llvm/test/CodeGen/Hexagon/addrmode.ll


################################################################################
diff  --git a/llvm/test/CodeGen/Hexagon/addrmode.ll b/llvm/test/CodeGen/Hexagon/addrmode.ll
deleted file mode 100644
index aa7dbca73fbb1..0000000000000
--- a/llvm/test/CodeGen/Hexagon/addrmode.ll
+++ /dev/null
@@ -1,107 +0,0 @@
-; RUN: llc -opaque-pointers=0 -O3 -march=hexagon < %s | FileCheck %s
-
-; CHECK-NOT: memb(r{{[0-9]+}}+#375) = #4
-; CHECK: [[REG0:(r[0-9]+)]] = add(r{{[0-9]+}},{{#?}}#374)
-; CHECK: memb([[REG0]]+#1) = #4
-
-%s.0 = type { %s.1, %s.2*, %s.2*, %s.3, %s.5, i32, i32, i16, i8, i8, i8, [7 x i8], i16, i8, i8, i16, i8, i8, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16, i16, i16, [14 x i8], %s.6, i8, i8, %s.8, [2 x [16 x %s.9]], i32 (i8*, i8*, i8*, i8*, i8*)*, [80 x i8], i8, i8, i8*, i8*, i8*, i32*, i8*, i8*, i8*, [4 x i8], i8*, i8*, i8*, i8*, i8*, i8*, %s.18*, %s.18*, %s.6*, [4 x i8], [2 x [80 x [8 x i8]]], [56 x i8], [2 x [81 x %s.10]], [2 x %s.10], %s.10*, %s.10*, i32, [32 x i32], i8*, %s.12*, i8, i8, %s.18, i64*, i32, %s.19, %s.20, %s.21*, i8, [19 x i8] }
-%s.1 = type { i32, i32, i8* }
-%s.2 = type { i8, i8 }
-%s.3 = type { [371 x %s.2], [6 x %s.4] }
-%s.4 = type { %s.2*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
-%s.5 = type { [12 x %s.2], [4 x %s.2], [2 x %s.2], [4 x %s.2], [6 x %s.2], [2 x [7 x %s.2]], [4 x %s.2], [3 x [4 x %s.2]], [3 x %s.2], [3 x %s.2] }
-%s.6 = type { i8*, i32, %s.7, i8*, i8*, i32 }
-%s.7 = type { i64 }
-%s.8 = type { i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i8, [2 x i8], [16 x i8], [4 x i8], [32 x i16], [32 x i16], [4 x i8], [2 x [4 x i8]], [2 x [4 x i8]], i32, i32, i16, i8 }
-%s.9 = type { [2 x i16] }
-%s.10 = type { %s.11, [2 x [4 x %s.9]], [2 x [2 x i8]], [2 x i8] }
-%s.11 = type { i8, i8, i8, i8, i8, i8, i8, i8, i32 }
-%s.12 = type { i8*, i8*, i32, i8*, i16*, i8*, i16*, i8*, i32, i16, i8, i32, i16*, i16*, i16, i16, i16, i8, i8, %s.13, i8, i8, i8, [32 x i8*], %s.14, %s.16, i8, i8, i8, i8 }
-%s.13 = type { [6 x [16 x i8]], [2 x [64 x i8]] }
-%s.14 = type { i32, i32, %s.15* }
-%s.15 = type { i16, i16 }
-%s.16 = type { %s.17 }
-%s.17 = type { i32, i32, i32 }
-%s.18 = type { i16*, i16*, i16*, i16*, i16*, i32, i32 }
-%s.19 = type { i32, i32, i32, i32 }
-%s.20 = type { i32, i32, i32 }
-%s.21 = type { %s.22*, i8, i8, i8*, i8*, i8*, i8*, i16, i8, void (%s.21*, i8, i8*, i8*, %s.25*, i32*)*, i8, i8, i8, i16, i8, i16, i8, i8, i8*, [4 x i8], i8, i8, [2 x i8], [2 x [4 x i8]], [2 x i16*], i8, i8, i8, i8, i16, i16, i16, i16, i16, i32, [4 x i8], [2 x %s.35], [2 x %s.35], [2 x [10 x %s.30]], %s.35*, %s.35*, %s.35*, %s.35*, [2 x %s.30*], [2 x %s.30*], [2 x %s.30*], [2 x %s.30*], %s.35, [2 x [16 x %s.30]], [2 x [5 x %s.30]], %s.37*, [4 x i8], %s.37, i8, i8, [6 x i8] }
-%s.22 = type { void (%s.21*, %s.23*)*, %s.27*, %s.28, %s.32, [4 x i8], [2 x [81 x %s.34]], [52 x i8], [52 x i8] }
-%s.23 = type { i16, i16, i8, [64 x %s.24], i8, i8, %s.26, [2 x i8], [4 x [2 x [4 x i16]]], [4 x [2 x [4 x i8]]], [32 x i8*], [32 x i8*] }
-%s.24 = type { %s.25, i8, i8, i8, i8, i8, i8, i8, i16 }
-%s.25 = type { i32 }
-%s.26 = type { i8, i8, i8, [2 x [3 x [32 x i16]]], [2 x [3 x [32 x i8]]] }
-%s.27 = type { i16, i16, i32, i8, [3 x i8], %s.13, i8, i8, [2 x i8], [1280 x i8], [765 x i8], [3 x i8], [2 x [640 x i8]], [2 x [720 x i8]], [80 x i8], [45 x i8], [45 x i8], [45 x i8] }
-%s.28 = type { i8, i8, i8, i32, i32, i32, i32, i8, i8, i8, i8, i8, i8, i8, i8, i8, %s.13*, i8, i8, i16*, i8, i8, i8, i8*, %s.29*, %s.31* }
-%s.29 = type { i8, %s.30*, i8* }
-%s.30 = type { %s.25, i8, i8, i8, i8 }
-%s.31 = type { i8, i8, i8, i8, %s.31**, i8, i8, i8, i8, i8, i8, i8, %s.31**, i8, %s.31**, i8*, i8*, i32, i32, i32, i32, i32, [2 x i8], [2 x i8], i32, i32, i8, i8, i32, %s.31*, %s.29*, i8, i8, i32, i32, i32, i32, i32, i32, [2 x i32], [2 x i64], [2 x i32], [2 x i32], [2 x i32], [2 x i32], [2 x i64] }
-%s.32 = type { i8, i8, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %s.26, %s.6*, [32 x %s.33], %s.33, [32 x %s.33], %s.29*, i8, [2 x [32 x i8]], [32 x i8*], [32 x i8*], [2 x [32 x i8]], [72 x i8], [72 x i32], [72 x i32], [72 x i32], [3 x [2 x [32 x [32 x i16]]]] }
-%s.33 = type { i32, i32, i32, i8, i8 }
-%s.34 = type { %s.35, [2 x [4 x %s.30]] }
-%s.35 = type { i32, i16, %s.36, i8, [3 x i8], i32 }
-%s.36 = type { i16 }
-%s.37 = type { i8, [1 x %s.38], [1 x [416 x i16]], %s.40, %s.38*, %s.38*, i16*, [4 x i8], i16*, %s.40*, %s.40*, %s.40*, %s.27*, [4 x i8], %s.42, %s.23, %s.43, i8 }
-%s.38 = type { %s.39, %s.39, %s.39 }
-%s.39 = type { i8*, i16, i16, i16 }
-%s.40 = type { %s.41, %s.41, %s.41, i8 }
-%s.41 = type { i8*, i16, i16, i16 }
-%s.42 = type { [32 x i8], [3 x i8], [3 x i8], [3 x i8], [3 x i8], [3 x i8], [3 x i8], i8, i8, [4 x i8] }
-%s.43 = type { i32, i32, i32, i32, i32, [3 x i8], [3 x i8], [3 x i8], [16 x i8], i8, i8, i8, i8, i32, i32, i16, i16* }
-%s.44 = type { i8, i8 }
-
-; Function Attrs: nounwind
-define i32 @f0(%s.0* %a0, %s.21* %a1, i8 zeroext %a2, %s.18* %a3, %s.20* %a4) local_unnamed_addr #0 {
-b0:
-  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 39, i32 2
-  %v1 = load i8, i8* %v0, align 2
-  %v2 = getelementptr inbounds %s.21, %s.21* %a1, i32 0, i32 47, i32 2
-  %v3 = bitcast %s.36* %v2 to %s.44*
-  %v4 = getelementptr inbounds %s.44, %s.44* %v3, i32 0, i32 1
-  store i8 %v1, i8* %v4, align 1
-  %v5 = getelementptr inbounds %s.21, %s.21* %a1, i32 0, i32 32
-  %v6 = getelementptr inbounds %s.21, %s.21* %a1, i32 0, i32 16
-  switch i8 %v1, label %b5 [
-    i8 1, label %b1
-    i8 0, label %b2
-  ]
-
-b1:                                               ; preds = %b0
-  %v7 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 39, i32 10
-  %v8 = load i8, i8* %v7, align 1
-  %v9 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 39, i32 3
-  %v10 = load i8, i8* %v9, align 1
-  store i8 %v10, i8* %v6, align 2
-  %v11 = getelementptr inbounds %s.21, %s.21* %a1, i32 0, i32 19
-  %v12 = bitcast [4 x i8]* %v11 to i32*
-  store i32 16843009, i32* %v12, align 8
-  %v13 = icmp eq i8 %v10, 15
-  switch i8 %v1, label %b4 [
-    i8 6, label %b3
-    i8 1, label %b3
-  ]
-
-b2:                                               ; preds = %b0
-  store i8 4, i8* %v4, align 1
-  store i8 0, i8* %v6, align 2
-  switch i8 %v1, label %b4 [
-    i8 6, label %b3
-    i8 1, label %b3
-  ]
-
-b3:                                               ; preds = %b2, %b2, %b1, %b1
-  %v14 = tail call fastcc signext i8 @f1(%s.21* nonnull %a1)
-  unreachable
-
-b4:                                               ; preds = %b2, %b1
-  unreachable
-
-b5:                                               ; preds = %b0
-  unreachable
-}
-
-; Function Attrs: norecurse nounwind
-declare i8 @f1(%s.21* nocapture) unnamed_addr #1
-
-attributes #0 = { nounwind "target-cpu"="hexagonv65" }
-attributes #1 = { norecurse nounwind "target-cpu"="hexagonv65" }

diff  --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll
index d73a922bd48bb..8d7958e4747d0 100644
--- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll
@@ -1,4 +1,4 @@
-; RUN: llc -opaque-pointers=0 -march=hexagon -O2 -enable-pipeliner -disable-block-placement=0 < %s  | FileCheck %s
+; RUN: llc -march=hexagon -O2 -enable-pipeliner -disable-block-placement=0 < %s  | FileCheck %s
 
 ; For the Phis generated in the epilog, test that we generate the correct
 ; names for the values coming from the prolog stages. The test belows
@@ -11,33 +11,27 @@
 ; CHECK: if ({{.*}}) {{jump|jump:nt|jump:t}} [[EPLOG:(.*)]]
 ; CHECK: [[EPLOG]]:
 ; CHECK: [[VREG1:v([0-9]+)]] = [[VREG]]
-; CHECK: [[VREG]] = v{{[0-9]+}}
 ; CHECK: [[EPLOG1]]:
-; CHECK: = vlalign([[VREG]],[[VREG1]],#1)
+; CHECK: [[VREG2:v[0-9]+]] = [[VREG1]]
+; CHECK: = vlalign([[VREG1]],[[VREG2]],#1)
 
 ; Function Attrs: nounwind
-define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i8* noalias nocapture readonly %a3, i32 %a4, i8* noalias nocapture %a5, i32 %a6) #0 {
+define void @f0(ptr noalias nocapture readonly %a0, i32 %a1, i32 %a2, ptr noalias nocapture readonly %a3, i32 %a4, ptr noalias nocapture %a5, i32 %a6) #0 {
 b0:
   %v0 = sub i32 0, %a1
-  %v1 = getelementptr inbounds i8, i8* %a0, i32 %v0
-  %v2 = bitcast i8* %v1 to <16 x i32>*
-  %v3 = bitcast i8* %a0 to <16 x i32>*
-  %v4 = getelementptr inbounds i8, i8* %a0, i32 %a1
-  %v5 = bitcast i8* %v4 to <16 x i32>*
+  %v1 = getelementptr inbounds i8, ptr %a0, i32 %v0
+  %v4 = getelementptr inbounds i8, ptr %a0, i32 %a1
   %v6 = mul nsw i32 %a1, 2
-  %v7 = getelementptr inbounds i8, i8* %a0, i32 %v6
-  %v8 = bitcast i8* %v7 to <16 x i32>*
-  %v9 = bitcast i8* %a5 to <16 x i32>*
-  %v10 = getelementptr inbounds i8, i8* %a5, i32 %a6
-  %v11 = bitcast i8* %v10 to <16 x i32>*
+  %v7 = getelementptr inbounds i8, ptr %a0, i32 %v6
+  %v10 = getelementptr inbounds i8, ptr %a5, i32 %a6
   %v12 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
-  %v13 = load <16 x i32>, <16 x i32>* %v2, align 64, !tbaa !0
-  %v14 = load <16 x i32>, <16 x i32>* %v3, align 64, !tbaa !0
-  %v15 = load <16 x i32>, <16 x i32>* %v5, align 64, !tbaa !0
-  %v16 = load <16 x i32>, <16 x i32>* %v8, align 64, !tbaa !0
-  %v17 = load i8, i8* %a3, align 1, !tbaa !0
-  %v18 = getelementptr inbounds i8, i8* %a3, i32 1
-  %v19 = load i8, i8* %v18, align 1, !tbaa !0
+  %v13 = load <16 x i32>, ptr %v1, align 64, !tbaa !0
+  %v14 = load <16 x i32>, ptr %a0, align 64, !tbaa !0
+  %v15 = load <16 x i32>, ptr %v4, align 64, !tbaa !0
+  %v16 = load <16 x i32>, ptr %v7, align 64, !tbaa !0
+  %v17 = load i8, ptr %a3, align 1, !tbaa !0
+  %v18 = getelementptr inbounds i8, ptr %a3, i32 1
+  %v19 = load i8, ptr %v18, align 1, !tbaa !0
   %v20 = zext i8 %v19 to i64
   %v21 = shl nuw nsw i64 %v20, 24
   %v22 = zext i8 %v17 to i64
@@ -47,10 +41,10 @@ b0:
   %v26 = or i64 %v21, %v25
   %v27 = or i64 %v24, %v26
   %v28 = trunc i64 %v27 to i32
-  %v29 = getelementptr inbounds i8, i8* %a3, i32 3
-  %v30 = load i8, i8* %v29, align 1, !tbaa !0
-  %v31 = getelementptr inbounds i8, i8* %a3, i32 4
-  %v32 = load i8, i8* %v31, align 1, !tbaa !0
+  %v29 = getelementptr inbounds i8, ptr %a3, i32 3
+  %v30 = load i8, ptr %v29, align 1, !tbaa !0
+  %v31 = getelementptr inbounds i8, ptr %a3, i32 4
+  %v32 = load i8, ptr %v31, align 1, !tbaa !0
   %v33 = zext i8 %v32 to i64
   %v34 = shl nuw nsw i64 %v33, 24
   %v35 = zext i8 %v30 to i64
@@ -60,10 +54,10 @@ b0:
   %v39 = or i64 %v34, %v38
   %v40 = or i64 %v37, %v39
   %v41 = trunc i64 %v40 to i32
-  %v42 = getelementptr inbounds i8, i8* %a3, i32 6
-  %v43 = load i8, i8* %v42, align 1, !tbaa !0
-  %v44 = getelementptr inbounds i8, i8* %a3, i32 7
-  %v45 = load i8, i8* %v44, align 1, !tbaa !0
+  %v42 = getelementptr inbounds i8, ptr %a3, i32 6
+  %v43 = load i8, ptr %v42, align 1, !tbaa !0
+  %v44 = getelementptr inbounds i8, ptr %a3, i32 7
+  %v45 = load i8, ptr %v44, align 1, !tbaa !0
   %v46 = zext i8 %v45 to i64
   %v47 = shl nuw nsw i64 %v46, 24
   %v48 = zext i8 %v43 to i64
@@ -73,10 +67,10 @@ b0:
   %v52 = or i64 %v47, %v51
   %v53 = or i64 %v50, %v52
   %v54 = trunc i64 %v53 to i32
-  %v55 = getelementptr inbounds i8, i8* %a3, i32 5
-  %v56 = load i8, i8* %v55, align 1, !tbaa !0
-  %v57 = getelementptr inbounds i8, i8* %a3, i32 2
-  %v58 = load i8, i8* %v57, align 1, !tbaa !0
+  %v55 = getelementptr inbounds i8, ptr %a3, i32 5
+  %v56 = load i8, ptr %v55, align 1, !tbaa !0
+  %v57 = getelementptr inbounds i8, ptr %a3, i32 2
+  %v58 = load i8, ptr %v57, align 1, !tbaa !0
   %v59 = zext i8 %v58 to i64
   %v60 = shl nuw nsw i64 %v59, 24
   %v61 = zext i8 %v56 to i64
@@ -86,8 +80,8 @@ b0:
   %v65 = or i64 %v60, %v64
   %v66 = or i64 %v63, %v65
   %v67 = trunc i64 %v66 to i32
-  %v68 = getelementptr inbounds i8, i8* %a3, i32 8
-  %v69 = load i8, i8* %v68, align 1, !tbaa !0
+  %v68 = getelementptr inbounds i8, ptr %a3, i32 8
+  %v69 = load i8, ptr %v68, align 1, !tbaa !0
   %v70 = zext i8 %v69 to i64
   %v71 = shl nuw nsw i64 %v70, 24
   %v72 = shl nuw nsw i64 %v70, 16
@@ -101,24 +95,21 @@ b0:
 
 b1:                                               ; preds = %b0
   %v79 = add i32 %v6, 64
-  %v80 = getelementptr inbounds i8, i8* %a0, i32 %v79
-  %v81 = bitcast i8* %v80 to <16 x i32>*
+  %v80 = getelementptr inbounds i8, ptr %a0, i32 %v79
   %v82 = add i32 %a1, 64
-  %v83 = getelementptr inbounds i8, i8* %a0, i32 %v82
-  %v84 = bitcast i8* %v83 to <16 x i32>*
-  %v85 = getelementptr inbounds i8, i8* %a0, i32 64
-  %v86 = bitcast i8* %v85 to <16 x i32>*
+  %v83 = getelementptr inbounds i8, ptr %a0, i32 %v82
+  %v85 = getelementptr inbounds i8, ptr %a0, i32 64
+  %v86 = bitcast ptr %v85 to ptr
   %v87 = sub i32 64, %a1
-  %v88 = getelementptr inbounds i8, i8* %a0, i32 %v87
-  %v89 = bitcast i8* %v88 to <16 x i32>*
+  %v88 = getelementptr inbounds i8, ptr %a0, i32 %v87
   %v90 = add i32 %a2, -65
   %v91 = lshr i32 %v90, 6
   %v92 = mul i32 %v91, 64
   %v93 = add i32 %v92, %a6
   %v94 = add i32 %v93, 64
-  %v95 = getelementptr i8, i8* %a5, i32 %v94
+  %v95 = getelementptr i8, ptr %a5, i32 %v94
   %v96 = add i32 %v92, 64
-  %v97 = getelementptr i8, i8* %a5, i32 %v96
+  %v97 = getelementptr i8, ptr %a5, i32 %v96
   br label %b2
 
 b2:                                               ; preds = %b2, %b1
@@ -131,24 +122,24 @@ b2:                                               ; preds = %b2, %b1
   %v104 = phi <16 x i32> [ %v15, %b1 ], [ %v122, %b2 ]
   %v105 = phi <16 x i32> [ %v12, %b1 ], [ %v106, %b2 ]
   %v106 = phi <16 x i32> [ %v16, %b1 ], [ %v124, %b2 ]
-  %v107 = phi <16 x i32>* [ %v89, %b1 ], [ %v117, %b2 ]
-  %v108 = phi <16 x i32>* [ %v86, %b1 ], [ %v119, %b2 ]
-  %v109 = phi <16 x i32>* [ %v84, %b1 ], [ %v121, %b2 ]
-  %v110 = phi <16 x i32>* [ %v81, %b1 ], [ %v123, %b2 ]
-  %v111 = phi <16 x i32>* [ %v9, %b1 ], [ %v148, %b2 ]
-  %v112 = phi <16 x i32>* [ %v11, %b1 ], [ %v152, %b2 ]
+  %v107 = phi ptr [ %v88, %b1 ], [ %v117, %b2 ]
+  %v108 = phi ptr [ %v86, %b1 ], [ %v119, %b2 ]
+  %v109 = phi ptr [ %v83, %b1 ], [ %v121, %b2 ]
+  %v110 = phi ptr [ %v80, %b1 ], [ %v123, %b2 ]
+  %v111 = phi ptr [ %a5, %b1 ], [ %v148, %b2 ]
+  %v112 = phi ptr [ %v10, %b1 ], [ %v152, %b2 ]
   %v113 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v100, <16 x i32> %v99, i32 1)
   %v114 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v102, <16 x i32> %v101, i32 1)
   %v115 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v104, <16 x i32> %v103, i32 1)
   %v116 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v106, <16 x i32> %v105, i32 1)
-  %v117 = getelementptr inbounds <16 x i32>, <16 x i32>* %v107, i32 1
-  %v118 = load <16 x i32>, <16 x i32>* %v107, align 64, !tbaa !0
-  %v119 = getelementptr inbounds <16 x i32>, <16 x i32>* %v108, i32 1
-  %v120 = load <16 x i32>, <16 x i32>* %v108, align 64, !tbaa !0
-  %v121 = getelementptr inbounds <16 x i32>, <16 x i32>* %v109, i32 1
-  %v122 = load <16 x i32>, <16 x i32>* %v109, align 64, !tbaa !0
-  %v123 = getelementptr inbounds <16 x i32>, <16 x i32>* %v110, i32 1
-  %v124 = load <16 x i32>, <16 x i32>* %v110, align 64, !tbaa !0
+  %v117 = getelementptr inbounds <16 x i32>, ptr %v107, i32 1
+  %v118 = load <16 x i32>, ptr %v107, align 64, !tbaa !0
+  %v119 = getelementptr inbounds <16 x i32>, ptr %v108, i32 1
+  %v120 = load <16 x i32>, ptr %v108, align 64, !tbaa !0
+  %v121 = getelementptr inbounds <16 x i32>, ptr %v109, i32 1
+  %v122 = load <16 x i32>, ptr %v109, align 64, !tbaa !0
+  %v123 = getelementptr inbounds <16 x i32>, ptr %v110, i32 1
+  %v124 = load <16 x i32>, ptr %v110, align 64, !tbaa !0
   %v125 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v118, <16 x i32> %v100, i32 1)
   %v126 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v120, <16 x i32> %v102, i32 1)
   %v127 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v122, <16 x i32> %v104, i32 1)
@@ -172,20 +163,18 @@ b2:                                               ; preds = %b2, %b1
   %v145 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v143)
   %v146 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v143)
   %v147 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v145, <16 x i32> %v146, i32 %a4)
-  %v148 = getelementptr inbounds <16 x i32>, <16 x i32>* %v111, i32 1
-  store <16 x i32> %v147, <16 x i32>* %v111, align 64, !tbaa !0
+  %v148 = getelementptr inbounds <16 x i32>, ptr %v111, i32 1
+  store <16 x i32> %v147, ptr %v111, align 64, !tbaa !0
   %v149 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v144)
   %v150 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v144)
   %v151 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v149, <16 x i32> %v150, i32 %a4)
-  %v152 = getelementptr inbounds <16 x i32>, <16 x i32>* %v112, i32 1
-  store <16 x i32> %v151, <16 x i32>* %v112, align 64, !tbaa !0
+  %v152 = getelementptr inbounds <16 x i32>, ptr %v112, i32 1
+  store <16 x i32> %v151, ptr %v112, align 64, !tbaa !0
   %v153 = add nsw i32 %v98, -64
   %v154 = icmp sgt i32 %v153, 64
   br i1 %v154, label %b2, label %b3
 
 b3:                                               ; preds = %b2
-  %v155 = bitcast i8* %v95 to <16 x i32>*
-  %v156 = bitcast i8* %v97 to <16 x i32>*
   br label %b4
 
 b4:                                               ; preds = %b3, %b0
@@ -197,8 +186,8 @@ b4:                                               ; preds = %b3, %b0
   %v162 = phi <16 x i32> [ %v122, %b3 ], [ %v15, %b0 ]
   %v163 = phi <16 x i32> [ %v106, %b3 ], [ %v12, %b0 ]
   %v164 = phi <16 x i32> [ %v124, %b3 ], [ %v16, %b0 ]
-  %v165 = phi <16 x i32>* [ %v156, %b3 ], [ %v9, %b0 ]
-  %v166 = phi <16 x i32>* [ %v155, %b3 ], [ %v11, %b0 ]
+  %v165 = phi ptr [ %v97, %b3 ], [ %a5, %b0 ]
+  %v166 = phi ptr [ %v95, %b3 ], [ %v10, %b0 ]
   %v167 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v158, <16 x i32> %v157, i32 1)
   %v168 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v160, <16 x i32> %v159, i32 1)
   %v169 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v162, <16 x i32> %v161, i32 1)
@@ -226,11 +215,11 @@ b4:                                               ; preds = %b3, %b0
   %v191 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v189)
   %v192 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v189)
   %v193 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v191, <16 x i32> %v192, i32 %a4)
-  store <16 x i32> %v193, <16 x i32>* %v165, align 64, !tbaa !0
+  store <16 x i32> %v193, ptr %v165, align 64, !tbaa !0
   %v194 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v190)
   %v195 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v190)
   %v196 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v194, <16 x i32> %v195, i32 %a4)
-  store <16 x i32> %v196, <16 x i32>* %v166, align 64, !tbaa !0
+  store <16 x i32> %v196, ptr %v166, align 64, !tbaa !0
   ret void
 }
 


        


More information about the llvm-commits mailing list