[llvm] r328561 - [Hexagon] Add more lit tests

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 26 10:53:48 PDT 2018


Author: kparzysz
Date: Mon Mar 26 10:53:48 2018
New Revision: 328561

URL: http://llvm.org/viewvc/llvm-project?rev=328561&view=rev
Log:
[Hexagon] Add more lit tests

Added:
    llvm/trunk/test/CodeGen/Hexagon/addrmode-align.ll
    llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomicrmw_bitwise_native.ll
    llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomicrmw_nand.ll
    llvm/trunk/test/CodeGen/Hexagon/lsr-post-inc-cross-use-offsets.ll
    llvm/trunk/test/CodeGen/Hexagon/retval-redundant-copy.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-bad-sched.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-const-tc1.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi7.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-memrefs-epilog1.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-phi-ch-offset.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-resmii-1.ll
    llvm/trunk/test/CodeGen/Hexagon/vdotprod.ll

Added: llvm/trunk/test/CodeGen/Hexagon/addrmode-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/addrmode-align.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/addrmode-align.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/addrmode-align.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,61 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK: [[REG0:(r[0-9]+)]] = add(r29
+; CHECK: [[REG1:(r[0-9]+)]] = add([[REG0]],#4)
+; CHECK-DAG: memd([[REG1]]+#8) =
+; CHECK-DAG: memd([[REG1]]+#0) =
+
+%s.0 = type { i32, i8, double, i32, float }
+
+ at g0 = external local_unnamed_addr global i32, align 4
+
+define i32 @f0() local_unnamed_addr {
+b0:
+  %v0 = alloca [10 x %s.0], align 8
+  %v1 = bitcast [10 x %s.0]* %v0 to i8*
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v2 = phi i32 [ 0, %b0 ], [ %v6, %b1 ]
+  %v3 = getelementptr inbounds [10 x %s.0], [10 x %s.0]* %v0, i32 0, i32 %v2, i32 0
+  store i32 0, i32* %v3, align 8
+  %v4 = getelementptr inbounds [10 x %s.0], [10 x %s.0]* %v0, i32 0, i32 %v2, i32 1
+  store i8 0, i8* %v4, align 4
+  %v5 = getelementptr inbounds [10 x %s.0], [10 x %s.0]* %v0, i32 0, i32 %v2, i32 2
+  %v6 = add nuw nsw i32 %v2, 1
+  %v7 = icmp eq i32 %v6, 10
+  %v8 = bitcast double* %v5 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 8 %v8, i8 0, i64 16, i1 false)
+  br i1 %v7, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v9 = phi i32 [ 0, %b2 ], [ %v10, %b3 ]
+  %v10 = add nuw nsw i32 %v9, 1
+  %v11 = icmp eq i32 %v10, 10
+  br i1 %v11, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  %v12 = getelementptr inbounds [10 x %s.0], [10 x %s.0]* %v0, i32 0, i32 0, i32 0
+  %v13 = load i32, i32* %v12, align 8
+  %v14 = sub nsw i32 1122, %v13
+  %v15 = icmp eq i32 %v14, 1121
+  br i1 %v15, label %b6, label %b5
+
+b5:                                               ; preds = %b4
+  store i32 1, i32* @g0, align 4
+  br label %b6
+
+b6:                                               ; preds = %b5, %b4
+  tail call void @f1()
+  unreachable
+}
+
+declare void @f1() local_unnamed_addr
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #0
+
+attributes #0 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomicrmw_bitwise_native.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomicrmw_bitwise_native.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomicrmw_bitwise_native.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomicrmw_bitwise_native.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,65 @@
+; RUN: sed -e "s/ORDER/monotonic/" -e "s/BINARY_OP/and/"  %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/acquire/"   -e "s/BINARY_OP/and/"  %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/release/"   -e "s/BINARY_OP/and/"  %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/acq_rel/"   -e "s/BINARY_OP/and/"  %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/seq_cst/"   -e "s/BINARY_OP/and/"  %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/monotonic/" -e "s/BINARY_OP/xor/"  %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/acquire/"   -e "s/BINARY_OP/xor/"  %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/release/"   -e "s/BINARY_OP/xor/"  %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/acq_rel/"   -e "s/BINARY_OP/xor/"  %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/seq_cst/"   -e "s/BINARY_OP/xor/"  %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/monotonic/" -e "s/BINARY_OP/or/"   %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/acquire/"   -e "s/BINARY_OP/or/"   %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/release/"   -e "s/BINARY_OP/or/"   %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/acq_rel/"   -e "s/BINARY_OP/or/"   %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/seq_cst/"   -e "s/BINARY_OP/or/"   %s | llc -march=hexagon | FileCheck %s
+
+ at g0 = global i32 0, align 4
+ at g1 = global i32 0, align 4
+ at g2 = global i32 0, align 4
+ at g3 = global i64 0, align 8
+ at g4 = global i64 0, align 8
+ at g5 = global i64 0, align 8
+
+; CHECK-LABEL: f0:
+; CHECK: // %[[BINARY_OP:[a-z_]*]]_entry
+; CHECK-DAG: [[SECOND_ADDR:r[0-9]+]] = ##g1
+; CHECK-DAG: [[FIRST_VALUE:r[0-9]+]] = memw(gp+#g0)
+
+; CHECK: [[FAIL_LABEL:\.LBB.*]]:
+
+; CHECK: [[LOCKED_READ_REG:r[0-9]+]] = memw_locked([[SECOND_ADDR]])
+; CHECK: [[RESULT_REG:r[0-9]+]] = [[BINARY_OP]]([[LOCKED_READ_REG]],[[FIRST_VALUE]])
+; CHECK: memw_locked([[SECOND_ADDR]],[[LOCK_PRED_REG:p[0-9]+]]) = [[RESULT_REG]]
+
+; CHECK: cmp.eq{{.*}}jump{{.*}}[[FAIL_LABEL]]
+; CHECK-DAG: memw(gp+#g2) = [[LOCKED_READ_REG]]
+; CHECK-DAG: jumpr r31
+define void @f0() {
+BINARY_OP_entry:
+  %v0 = load i32, i32* @g0, align 4
+  %v1 = atomicrmw BINARY_OP i32* @g1, i32 %v0 ORDER
+  store i32 %v1, i32* @g2, align 4
+  ret void
+}
+
+; CHECK-LABEL: f1:
+; CHECK-DAG: [[SECOND_ADDR:r[0-9]+]] = ##g4
+; CHECK-DAG: [[FIRST_VALUE:r[:0-9]+]] = memd(gp+#g3)
+
+; CHECK: [[FAIL_LABEL:\.LBB.*]]:
+
+; CHECK: [[LOCKED_READ_REG:r[:0-9]+]] = memd_locked([[SECOND_ADDR]])
+; CHECK: [[RESULT_REG:r[:0-9]+]] = [[BINARY_OP]]([[LOCKED_READ_REG]],[[FIRST_VALUE]])
+; CHECK: memd_locked([[SECOND_ADDR]],[[LOCK_PRED_REG:p[0-9]+]]) = [[RESULT_REG]]
+
+; CHECK: cmp.eq{{.*}}jump{{.*}}[[FAIL_LABEL]]
+; CHECK-DAG: memd(gp+#g5) = [[LOCKED_READ_REG]]
+; CHECK-DAG: jumpr r31
+define void @f1() {
+b0:
+  %v0 = load i64, i64* @g3, align 8
+  %v1 = atomicrmw BINARY_OP i64* @g4, i64 %v0 ORDER
+  store i64 %v1, i64* @g5, align 8
+  ret void
+}

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomicrmw_nand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomicrmw_nand.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomicrmw_nand.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomicrmw_nand.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,56 @@
+; RUN: sed -e "s/ORDER/monotonic/" %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/acquire/"   %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/release/"   %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/acq_rel/"   %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/seq_cst/"   %s | llc -march=hexagon | FileCheck %s
+
+ at g0 = global i32 0, align 4
+ at g1 = global i32 0, align 4
+ at g2 = global i32 0, align 4
+ at g3 = global i64 0, align 8
+ at g4 = global i64 0, align 8
+ at g5 = global i64 0, align 8
+
+; CHECK-LABEL: f0:
+; CHECK-DAG: [[SECOND_ADDR:r[0-9]+]] = ##g1
+; CHECK-DAG: [[FIRST_VALUE:r[0-9]+]] = memw(gp+#g0)
+
+; CHECK: [[FAIL_LABEL:\.LBB.*]]:
+
+; CHECK: [[LOCKED_READ_REG:r[0-9]+]] = memw_locked([[SECOND_ADDR]])
+; CHECK: [[AND_RESULT_REG:r[0-9]+]] = and([[LOCKED_READ_REG]],[[FIRST_VALUE]])
+; CHECK: [[RESULT_REG:r[0-9]+]] = sub(#-1,[[AND_RESULT_REG]])
+; CHECK: memw_locked([[SECOND_ADDR]],[[LOCK_PRED_REG:p[0-9]+]]) = [[RESULT_REG]]
+
+; CHECK: cmp.eq{{.*}}jump{{.*}}[[FAIL_LABEL]]
+; CHECK-DAG: memw(gp+#g2) = [[LOCKED_READ_REG]]
+; CHECK-DAG: jumpr r31
+define void @f0() {
+b0:
+  %v0 = load i32, i32* @g0, align 4
+  %v1 = atomicrmw nand i32* @g1, i32 %v0 ORDER
+  store i32 %v1, i32* @g2, align 4
+  ret void
+}
+
+; CHECK-LABEL: f1:
+; CHECK-DAG: [[SECOND_ADDR:r[0-9]+]] = ##g4
+; CHECK-DAG: [[FIRST_VALUE:r[:0-9]+]] = memd(gp+#g3)
+
+; CHECK: [[FAIL_LABEL:\.LBB.*]]:
+
+; CHECK: [[LOCKED_READ_REG:r[:0-9]+]] = memd_locked([[SECOND_ADDR]])
+; CHECK: [[AND_RESULT_REG:r[:0-9]+]] = and([[LOCKED_READ_REG]],[[FIRST_VALUE]])
+; CHECK: [[RESULT_REG:r[:0-9]+]] = not([[AND_RESULT_REG]])
+; CHECK: memd_locked([[SECOND_ADDR]],[[LOCK_PRED_REG:p[0-9]+]]) = [[RESULT_REG]]
+
+; CHECK: cmp.eq{{.*}}jump{{.*}}[[FAIL_LABEL]]
+; CHECK-DAG: memd(gp+#g5) = [[LOCKED_READ_REG]]
+; CHECK-DAG: jumpr r31
+define void @f1() {
+b0:
+  %v0 = load i64, i64* @g3, align 8
+  %v1 = atomicrmw nand i64* @g4, i64 %v0 ORDER
+  store i64 %v1, i64* @g5, align 8
+  ret void
+}

Added: llvm/trunk/test/CodeGen/Hexagon/lsr-post-inc-cross-use-offsets.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/lsr-post-inc-cross-use-offsets.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/lsr-post-inc-cross-use-offsets.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/lsr-post-inc-cross-use-offsets.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,352 @@
+; RUN: llc -O3 -march=hexagon < %s | FileCheck %s
+
+; CHECK: loop0(.[[BLOCK:LBB0_[0-9]+]]
+; CHECK: .[[BLOCK]]:
+; CHECK: = vmemu({{r[0-9]+}}++#1)
+; CHECK: = vmemu({{r[0-9]+}}++#1)
+; CHECK: = vmemu({{r[0-9]+}}++#1)
+; CHECK: = vmemu({{r[0-9]+}}++#1)
+; CHECK: endloop0
+
+target triple = "hexagon-unknown--elf"
+
+%0 = type { i8*, i32, i32, i32, i32, %1*, %1*, %1* }
+%1 = type { %2 }
+%2 = type { i64 }
+%3 = type { i8*, i32, i32, i32, i32, i32, i32, i8*, i32, i32* }
+%4 = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+
+ at g0 = private unnamed_addr constant [5 x i8] c"Load\00", align 1
+ at g1 = private unnamed_addr constant [6 x i8] c"Store\00", align 1
+ at g2 = private unnamed_addr constant [18 x i8] c"Begin realization\00", align 1
+ at g3 = private unnamed_addr constant [16 x i8] c"End realization\00", align 1
+ at g4 = private unnamed_addr constant [8 x i8] c"Produce\00", align 1
+ at g5 = private unnamed_addr constant [7 x i8] c"Update\00", align 1
+ at g6 = private unnamed_addr constant [8 x i8] c"Consume\00", align 1
+ at g7 = private unnamed_addr constant [12 x i8] c"End consume\00", align 1
+ at g8 = private constant [6 x i8] c"input\00", align 32
+ at g9 = private constant [10 x i8] c"dilate3x3\00", align 32
+ at g10 = private constant [2 x %0] [%0 { i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g8, i32 0, i32 0), i32 1, i32 2, i32 1, i32 8, %1* null, %1* null, %1* null }, %0 { i8* getelementptr inbounds ([10 x i8], [10 x i8]* @g9, i32 0, i32 0), i32 2, i32 2, i32 1, i32 8, %1* null, %1* null, %1* null }]
+ at g11 = private constant [64 x i8] c"...............................................................\00", align 32
+
+; Function Attrs: nounwind
+declare i8* @f0(i8*, i32) #0
+
+; Function Attrs: nounwind
+declare void @f1(i8*, i8*) #0
+
+; Function Attrs: nounwind
+declare void @f2(i8*, i8*) #0
+
+; Function Attrs: nounwind
+declare i32 @f3(i8*, %3*) #0
+
+; Function Attrs: nounwind
+declare void @f4() #0
+
+; Function Attrs: nounwind
+declare void @f5() #0
+
+; Function Attrs: nounwind
+define i32 @f6(%4* noalias nocapture readonly %a0, %4* noalias nocapture readonly %a1) #0 {
+b0:
+  %v0 = getelementptr inbounds %4, %4* %a0, i32 0, i32 1
+  %v1 = load i8*, i8** %v0, align 4
+  %v2 = getelementptr inbounds %4, %4* %a0, i32 0, i32 3, i32 1
+  %v3 = load i32, i32* %v2, align 4
+  %v4 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0
+  %v5 = load i32, i32* %v4, align 4
+  %v6 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 1
+  %v7 = load i32, i32* %v6, align 4
+  %v8 = getelementptr inbounds %4, %4* %a1, i32 0, i32 1
+  %v9 = load i8*, i8** %v8, align 4
+  %v10 = getelementptr inbounds %4, %4* %a1, i32 0, i32 2, i32 0
+  %v11 = load i32, i32* %v10, align 4
+  %v12 = getelementptr inbounds %4, %4* %a1, i32 0, i32 3, i32 1
+  %v13 = load i32, i32* %v12, align 4
+  %v14 = getelementptr inbounds %4, %4* %a1, i32 0, i32 4, i32 0
+  %v15 = load i32, i32* %v14, align 4
+  %v16 = getelementptr inbounds %4, %4* %a1, i32 0, i32 4, i32 1
+  %v17 = load i32, i32* %v16, align 4
+  %v18 = getelementptr inbounds %4, %4* %a1, i32 0, i32 2, i32 1
+  %v19 = load i32, i32* %v18, align 4
+  %v20 = add nsw i32 %v19, %v17
+  %v21 = icmp sgt i32 %v19, 0
+  br i1 %v21, label %b1, label %b11, !prof !3
+
+b1:                                               ; preds = %b0
+  %v22 = ashr i32 %v11, 7
+  %v23 = icmp slt i32 %v22, 0
+  %v24 = select i1 %v23, i32 0, i32 %v22
+  %v25 = icmp sgt i32 %v24, 0
+  br i1 %v25, label %b5, label %b7, !prof !3
+
+b2:                                               ; preds = %b5, %b2
+  %v26 = phi i32 [ %v90, %b2 ], [ 0, %b5 ]
+  %v27 = mul nsw i32 %v7, %v3
+  %v28 = add nsw i32 %v27, %v5
+  %v29 = shl nsw i32 %v26, 7
+  %v30 = add nsw i32 %v29, %v15
+  %v31 = add nsw i32 %v150, -1
+  %v32 = mul nsw i32 %v31, %v3
+  %v33 = mul nsw i32 %v150, %v3
+  %v34 = add nsw i32 %v150, 1
+  %v35 = mul nsw i32 %v34, %v3
+  %v36 = sub i32 %v32, %v28
+  %v37 = add i32 %v36, %v30
+  %v38 = add nsw i32 %v37, -1
+  %v39 = getelementptr inbounds i8, i8* %v1, i32 %v38
+  %v40 = bitcast i8* %v39 to <32 x i32>*
+  %v41 = load <32 x i32>, <32 x i32>* %v40, align 1, !tbaa !4
+  %v42 = getelementptr inbounds i8, i8* %v1, i32 %v37
+  %v43 = bitcast i8* %v42 to <32 x i32>*
+  %v44 = load <32 x i32>, <32 x i32>* %v43, align 1, !tbaa !4
+  %v45 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v41, <32 x i32> %v44)
+  %v46 = add nsw i32 %v37, 1
+  %v47 = getelementptr inbounds i8, i8* %v1, i32 %v46
+  %v48 = bitcast i8* %v47 to <32 x i32>*
+  %v49 = load <32 x i32>, <32 x i32>* %v48, align 1, !tbaa !4
+  %v50 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v45, <32 x i32> %v49)
+  %v51 = sub i32 %v33, %v28
+  %v52 = add i32 %v51, %v30
+  %v53 = add nsw i32 %v52, -1
+  %v54 = getelementptr inbounds i8, i8* %v1, i32 %v53
+  %v55 = bitcast i8* %v54 to <32 x i32>*
+  %v56 = load <32 x i32>, <32 x i32>* %v55, align 1, !tbaa !4
+  %v57 = getelementptr inbounds i8, i8* %v1, i32 %v52
+  %v58 = bitcast i8* %v57 to <32 x i32>*
+  %v59 = load <32 x i32>, <32 x i32>* %v58, align 1, !tbaa !4
+  %v60 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v56, <32 x i32> %v59)
+  %v61 = add nsw i32 %v52, 1
+  %v62 = getelementptr inbounds i8, i8* %v1, i32 %v61
+  %v63 = bitcast i8* %v62 to <32 x i32>*
+  %v64 = load <32 x i32>, <32 x i32>* %v63, align 1, !tbaa !4
+  %v65 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v60, <32 x i32> %v64)
+  %v66 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v50, <32 x i32> %v65)
+  %v67 = sub i32 %v35, %v28
+  %v68 = add i32 %v67, %v30
+  %v69 = add nsw i32 %v68, -1
+  %v70 = getelementptr inbounds i8, i8* %v1, i32 %v69
+  %v71 = bitcast i8* %v70 to <32 x i32>*
+  %v72 = load <32 x i32>, <32 x i32>* %v71, align 1, !tbaa !4
+  %v73 = getelementptr inbounds i8, i8* %v1, i32 %v68
+  %v74 = bitcast i8* %v73 to <32 x i32>*
+  %v75 = load <32 x i32>, <32 x i32>* %v74, align 1, !tbaa !4
+  %v76 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v72, <32 x i32> %v75)
+  %v77 = add nsw i32 %v68, 1
+  %v78 = getelementptr inbounds i8, i8* %v1, i32 %v77
+  %v79 = bitcast i8* %v78 to <32 x i32>*
+  %v80 = load <32 x i32>, <32 x i32>* %v79, align 1, !tbaa !4
+  %v81 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v76, <32 x i32> %v80)
+  %v82 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v66, <32 x i32> %v81)
+  %v83 = mul nsw i32 %v150, %v13
+  %v84 = mul nsw i32 %v17, %v13
+  %v85 = add i32 %v84, %v15
+  %v86 = sub i32 %v83, %v85
+  %v87 = add i32 %v86, %v30
+  %v88 = getelementptr inbounds i8, i8* %v9, i32 %v87
+  %v89 = bitcast i8* %v88 to <32 x i32>*
+  store <32 x i32> %v82, <32 x i32>* %v89, align 1, !tbaa !7
+  %v90 = add nuw nsw i32 %v26, 1
+  %v91 = icmp eq i32 %v90, %v24
+  br i1 %v91, label %b6, label %b2
+
+b3:                                               ; preds = %b6, %b3
+  %v92 = phi i32 [ %v147, %b3 ], [ %v24, %b6 ]
+  %v93 = add nsw i32 %v15, %v11
+  %v94 = sub i32 %v93, %v28
+  %v95 = add i32 %v94, %v32
+  %v96 = add nsw i32 %v95, -129
+  %v97 = getelementptr inbounds i8, i8* %v1, i32 %v96
+  %v98 = bitcast i8* %v97 to <32 x i32>*
+  %v99 = load <32 x i32>, <32 x i32>* %v98, align 1, !tbaa !4
+  %v100 = add nsw i32 %v95, -128
+  %v101 = getelementptr inbounds i8, i8* %v1, i32 %v100
+  %v102 = bitcast i8* %v101 to <32 x i32>*
+  %v103 = load <32 x i32>, <32 x i32>* %v102, align 1, !tbaa !4
+  %v104 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v99, <32 x i32> %v103)
+  %v105 = add nsw i32 %v95, -127
+  %v106 = getelementptr inbounds i8, i8* %v1, i32 %v105
+  %v107 = bitcast i8* %v106 to <32 x i32>*
+  %v108 = load <32 x i32>, <32 x i32>* %v107, align 1, !tbaa !4
+  %v109 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v104, <32 x i32> %v108)
+  %v110 = add i32 %v94, %v33
+  %v111 = add nsw i32 %v110, -129
+  %v112 = getelementptr inbounds i8, i8* %v1, i32 %v111
+  %v113 = bitcast i8* %v112 to <32 x i32>*
+  %v114 = load <32 x i32>, <32 x i32>* %v113, align 1, !tbaa !4
+  %v115 = add nsw i32 %v110, -128
+  %v116 = getelementptr inbounds i8, i8* %v1, i32 %v115
+  %v117 = bitcast i8* %v116 to <32 x i32>*
+  %v118 = load <32 x i32>, <32 x i32>* %v117, align 1, !tbaa !4
+  %v119 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v114, <32 x i32> %v118)
+  %v120 = add nsw i32 %v110, -127
+  %v121 = getelementptr inbounds i8, i8* %v1, i32 %v120
+  %v122 = bitcast i8* %v121 to <32 x i32>*
+  %v123 = load <32 x i32>, <32 x i32>* %v122, align 1, !tbaa !4
+  %v124 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v119, <32 x i32> %v123)
+  %v125 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v109, <32 x i32> %v124)
+  %v126 = add i32 %v94, %v35
+  %v127 = add nsw i32 %v126, -129
+  %v128 = getelementptr inbounds i8, i8* %v1, i32 %v127
+  %v129 = bitcast i8* %v128 to <32 x i32>*
+  %v130 = load <32 x i32>, <32 x i32>* %v129, align 1, !tbaa !4
+  %v131 = add nsw i32 %v126, -128
+  %v132 = getelementptr inbounds i8, i8* %v1, i32 %v131
+  %v133 = bitcast i8* %v132 to <32 x i32>*
+  %v134 = load <32 x i32>, <32 x i32>* %v133, align 1, !tbaa !4
+  %v135 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v130, <32 x i32> %v134)
+  %v136 = add nsw i32 %v126, -127
+  %v137 = getelementptr inbounds i8, i8* %v1, i32 %v136
+  %v138 = bitcast i8* %v137 to <32 x i32>*
+  %v139 = load <32 x i32>, <32 x i32>* %v138, align 1, !tbaa !4
+  %v140 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v135, <32 x i32> %v139)
+  %v141 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v125, <32 x i32> %v140)
+  %v142 = add i32 %v11, -128
+  %v143 = sub i32 %v142, %v84
+  %v144 = add i32 %v143, %v83
+  %v145 = getelementptr inbounds i8, i8* %v9, i32 %v144
+  %v146 = bitcast i8* %v145 to <32 x i32>*
+  store <32 x i32> %v141, <32 x i32>* %v146, align 1, !tbaa !7
+  %v147 = add nuw nsw i32 %v92, 1
+  %v148 = icmp eq i32 %v147, %v152
+  br i1 %v148, label %b4, label %b3
+
+b4:                                               ; preds = %b6, %b3
+  %v149 = icmp eq i32 %v34, %v20
+  br i1 %v149, label %b11, label %b5
+
+b5:                                               ; preds = %b4, %b1
+  %v150 = phi i32 [ %v34, %b4 ], [ %v17, %b1 ]
+  br label %b2
+
+b6:                                               ; preds = %b2
+  %v151 = add nsw i32 %v11, 127
+  %v152 = ashr i32 %v151, 7
+  %v153 = icmp slt i32 %v24, %v152
+  br i1 %v153, label %b3, label %b4, !prof !3
+
+b7:                                               ; preds = %b1
+  %v154 = add nsw i32 %v11, 127
+  %v155 = ashr i32 %v154, 7
+  %v156 = icmp slt i32 %v24, %v155
+  br i1 %v156, label %b9, label %b11, !prof !3
+
+b8:                                               ; preds = %b9, %b8
+  %v157 = phi i32 [ %v221, %b8 ], [ %v24, %b9 ]
+  %v158 = mul nsw i32 %v7, %v3
+  %v159 = add nsw i32 %v158, %v5
+  %v160 = add nsw i32 %v15, %v11
+  %v161 = add nsw i32 %v223, -1
+  %v162 = mul nsw i32 %v161, %v3
+  %v163 = mul nsw i32 %v223, %v3
+  %v164 = add nsw i32 %v223, 1
+  %v165 = mul nsw i32 %v164, %v3
+  %v166 = sub i32 %v160, %v159
+  %v167 = add i32 %v166, %v162
+  %v168 = add nsw i32 %v167, -129
+  %v169 = getelementptr inbounds i8, i8* %v1, i32 %v168
+  %v170 = bitcast i8* %v169 to <32 x i32>*
+  %v171 = load <32 x i32>, <32 x i32>* %v170, align 1, !tbaa !4
+  %v172 = add nsw i32 %v167, -128
+  %v173 = getelementptr inbounds i8, i8* %v1, i32 %v172
+  %v174 = bitcast i8* %v173 to <32 x i32>*
+  %v175 = load <32 x i32>, <32 x i32>* %v174, align 1, !tbaa !4
+  %v176 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v171, <32 x i32> %v175)
+  %v177 = add nsw i32 %v167, -127
+  %v178 = getelementptr inbounds i8, i8* %v1, i32 %v177
+  %v179 = bitcast i8* %v178 to <32 x i32>*
+  %v180 = load <32 x i32>, <32 x i32>* %v179, align 1, !tbaa !4
+  %v181 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v176, <32 x i32> %v180)
+  %v182 = add i32 %v166, %v163
+  %v183 = add nsw i32 %v182, -129
+  %v184 = getelementptr inbounds i8, i8* %v1, i32 %v183
+  %v185 = bitcast i8* %v184 to <32 x i32>*
+  %v186 = load <32 x i32>, <32 x i32>* %v185, align 1, !tbaa !4
+  %v187 = add nsw i32 %v182, -128
+  %v188 = getelementptr inbounds i8, i8* %v1, i32 %v187
+  %v189 = bitcast i8* %v188 to <32 x i32>*
+  %v190 = load <32 x i32>, <32 x i32>* %v189, align 1, !tbaa !4
+  %v191 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v186, <32 x i32> %v190)
+  %v192 = add nsw i32 %v182, -127
+  %v193 = getelementptr inbounds i8, i8* %v1, i32 %v192
+  %v194 = bitcast i8* %v193 to <32 x i32>*
+  %v195 = load <32 x i32>, <32 x i32>* %v194, align 1, !tbaa !4
+  %v196 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v191, <32 x i32> %v195)
+  %v197 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v181, <32 x i32> %v196)
+  %v198 = add i32 %v166, %v165
+  %v199 = add nsw i32 %v198, -129
+  %v200 = getelementptr inbounds i8, i8* %v1, i32 %v199
+  %v201 = bitcast i8* %v200 to <32 x i32>*
+  %v202 = load <32 x i32>, <32 x i32>* %v201, align 1, !tbaa !4
+  %v203 = add nsw i32 %v198, -128
+  %v204 = getelementptr inbounds i8, i8* %v1, i32 %v203
+  %v205 = bitcast i8* %v204 to <32 x i32>*
+  %v206 = load <32 x i32>, <32 x i32>* %v205, align 1, !tbaa !4
+  %v207 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v202, <32 x i32> %v206)
+  %v208 = add nsw i32 %v198, -127
+  %v209 = getelementptr inbounds i8, i8* %v1, i32 %v208
+  %v210 = bitcast i8* %v209 to <32 x i32>*
+  %v211 = load <32 x i32>, <32 x i32>* %v210, align 1, !tbaa !4
+  %v212 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v207, <32 x i32> %v211)
+  %v213 = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32> %v197, <32 x i32> %v212)
+  %v214 = mul nsw i32 %v223, %v13
+  %v215 = mul nsw i32 %v17, %v13
+  %v216 = add i32 %v11, -128
+  %v217 = sub i32 %v216, %v215
+  %v218 = add i32 %v217, %v214
+  %v219 = getelementptr inbounds i8, i8* %v9, i32 %v218
+  %v220 = bitcast i8* %v219 to <32 x i32>*
+  store <32 x i32> %v213, <32 x i32>* %v220, align 1, !tbaa !7
+  %v221 = add nuw nsw i32 %v157, 1
+  %v222 = icmp eq i32 %v221, %v155
+  br i1 %v222, label %b10, label %b8
+
+b9:                                               ; preds = %b10, %b7
+  %v223 = phi i32 [ %v164, %b10 ], [ %v17, %b7 ]
+  br label %b8
+
+b10:                                              ; preds = %b8
+  %v224 = icmp eq i32 %v164, %v20
+  br i1 %v224, label %b11, label %b9
+
+b11:                                              ; preds = %b10, %b7, %b4, %b0
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmaxub.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind
+define i32 @f7(%4* noalias nocapture readonly %a0, %4* noalias nocapture readonly %a1) #0 {
+b0:
+  %v0 = tail call i32 @f6(%4* %a0, %4* %a1) #0
+  ret i32 0
+}
+
+; Function Attrs: nounwind
+define i32 @f8(i8** nocapture readonly %a0) #0 {
+b0:
+  %v0 = bitcast i8** %a0 to %4**
+  %v1 = load %4*, %4** %v0, align 4
+  %v2 = getelementptr i8*, i8** %a0, i32 1
+  %v3 = bitcast i8** %v2 to %4**
+  %v4 = load %4*, %4** %v3, align 4
+  %v5 = tail call i32 @f7(%4* %v1, %4* %v4)
+  ret i32 0
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }
+
+!llvm.module.flags = !{!0, !1, !2}
+
+!0 = !{i32 2, !"halide_use_soft_float_abi", i32 0}
+!1 = !{i32 2, !"halide_mcpu", !"hexagonv60"}
+!2 = !{i32 2, !"halide_mattrs", !"+hvxv60,+hvx-length64b"}
+!3 = !{!"branch_weights", i32 1073741824, i32 0}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"input", !6}
+!6 = !{!"Halide buffer"}
+!7 = !{!8, !8, i64 0}
+!8 = !{!"dilate3x3", !6}

Added: llvm/trunk/test/CodeGen/Hexagon/retval-redundant-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/retval-redundant-copy.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/retval-redundant-copy.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/retval-redundant-copy.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,20 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+;
+; Check whether there are no redundant register copies of return values
+;
+; CHECK: memw(gp+#g0) = r0
+; CHECK: memw(gp+#g1) = r0
+
+ at g0 = external global i32
+ at g1 = external global i32
+
+define void @f0() {
+b0:
+  %v0 = tail call i32 @f1(i32 1, i32 2, i32 3)
+  store i32 %v0, i32* @g0, align 4
+  %v1 = tail call i32 @f1(i32 4, i32 5, i32 6)
+  store i32 %v1, i32* @g1, align 4
+  ret void
+}
+
+declare i32 @f1(i32, i32, i32)

Added: llvm/trunk/test/CodeGen/Hexagon/swp-bad-sched.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-bad-sched.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-bad-sched.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-bad-sched.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,147 @@
+; REQUIRES: asserts
+; RUN: llc -march=hexagon -enable-pipeliner -enable-aa-sched-mi < %s | FileCheck %s
+
+; CHECK: loop0(
+; CHECK: loop0(.LBB0_[[LOOP:.]],
+; CHECK: .LBB0_[[LOOP]]:
+; CHECK: or
+; CHECK: or
+; CHECK: }
+; CHECK: {
+; CHECK: }
+; CHECK: {
+; CHECK: memw
+; CHECK-NEXT: }{{[ \t]*}}:endloop0
+
+; Function Attrs: nounwind
+define void @f0([576 x i32]* nocapture %a0, i32 %a1, i32* nocapture %a2) #0 {
+b0:
+  %v0 = icmp sgt i32 %a1, 0
+  br i1 %v0, label %b1, label %b9
+
+b1:                                               ; preds = %b0
+  %v1 = icmp ugt i32 %a1, 3
+  %v2 = add i32 %a1, -3
+  br i1 %v1, label %b2, label %b5
+
+b2:                                               ; preds = %b1
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v3 = phi i32 [ %v48, %b3 ], [ 0, %b2 ]
+  %v4 = phi i32 [ %v46, %b3 ], [ 0, %b2 ]
+  %v5 = phi i32 [ %v49, %b3 ], [ 0, %b2 ]
+  %v6 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 0, i32 %v5
+  %v7 = load i32, i32* %v6, align 4, !tbaa !0
+  %v8 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 1, i32 %v5
+  %v9 = load i32, i32* %v8, align 4, !tbaa !0
+  %v10 = add nsw i32 %v9, %v7
+  store i32 %v10, i32* %v6, align 4, !tbaa !0
+  %v11 = sub nsw i32 %v7, %v9
+  store i32 %v11, i32* %v8, align 4, !tbaa !0
+  %v12 = tail call i32 @llvm.hexagon.A2.abs(i32 %v10)
+  %v13 = or i32 %v12, %v4
+  %v14 = tail call i32 @llvm.hexagon.A2.abs(i32 %v11)
+  %v15 = or i32 %v14, %v3
+  %v16 = add nsw i32 %v5, 1
+  %v17 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 0, i32 %v16
+  %v18 = load i32, i32* %v17, align 4, !tbaa !0
+  %v19 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 1, i32 %v16
+  %v20 = load i32, i32* %v19, align 4, !tbaa !0
+  %v21 = add nsw i32 %v20, %v18
+  store i32 %v21, i32* %v17, align 4, !tbaa !0
+  %v22 = sub nsw i32 %v18, %v20
+  store i32 %v22, i32* %v19, align 4, !tbaa !0
+  %v23 = tail call i32 @llvm.hexagon.A2.abs(i32 %v21)
+  %v24 = or i32 %v23, %v13
+  %v25 = tail call i32 @llvm.hexagon.A2.abs(i32 %v22)
+  %v26 = or i32 %v25, %v15
+  %v27 = add nsw i32 %v5, 2
+  %v28 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 0, i32 %v27
+  %v29 = load i32, i32* %v28, align 4, !tbaa !0
+  %v30 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 1, i32 %v27
+  %v31 = load i32, i32* %v30, align 4, !tbaa !0
+  %v32 = add nsw i32 %v31, %v29
+  store i32 %v32, i32* %v28, align 4, !tbaa !0
+  %v33 = sub nsw i32 %v29, %v31
+  store i32 %v33, i32* %v30, align 4, !tbaa !0
+  %v34 = tail call i32 @llvm.hexagon.A2.abs(i32 %v32)
+  %v35 = or i32 %v34, %v24
+  %v36 = tail call i32 @llvm.hexagon.A2.abs(i32 %v33)
+  %v37 = or i32 %v36, %v26
+  %v38 = add nsw i32 %v5, 3
+  %v39 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 0, i32 %v38
+  %v40 = load i32, i32* %v39, align 4, !tbaa !0
+  %v41 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 1, i32 %v38
+  %v42 = load i32, i32* %v41, align 4, !tbaa !0
+  %v43 = add nsw i32 %v42, %v40
+  store i32 %v43, i32* %v39, align 4, !tbaa !0
+  %v44 = sub nsw i32 %v40, %v42
+  store i32 %v44, i32* %v41, align 4, !tbaa !0
+  %v45 = tail call i32 @llvm.hexagon.A2.abs(i32 %v43)
+  %v46 = or i32 %v45, %v35
+  %v47 = tail call i32 @llvm.hexagon.A2.abs(i32 %v44)
+  %v48 = or i32 %v47, %v37
+  %v49 = add nsw i32 %v5, 4
+  %v50 = icmp slt i32 %v49, %v2
+  br i1 %v50, label %b3, label %b4
+
+b4:                                               ; preds = %b3
+  br label %b5
+
+b5:                                               ; preds = %b4, %b1
+  %v51 = phi i32 [ 0, %b1 ], [ %v49, %b4 ]
+  %v52 = phi i32 [ 0, %b1 ], [ %v48, %b4 ]
+  %v53 = phi i32 [ 0, %b1 ], [ %v46, %b4 ]
+  %v54 = icmp eq i32 %v51, %a1
+  br i1 %v54, label %b9, label %b6
+
+b6:                                               ; preds = %b5
+  br label %b7
+
+b7:                                               ; preds = %b7, %b6
+  %v55 = phi i32 [ %v67, %b7 ], [ %v52, %b6 ]
+  %v56 = phi i32 [ %v65, %b7 ], [ %v53, %b6 ]
+  %v57 = phi i32 [ %v68, %b7 ], [ %v51, %b6 ]
+  %v58 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 0, i32 %v57
+  %v59 = load i32, i32* %v58, align 4, !tbaa !0
+  %v60 = getelementptr inbounds [576 x i32], [576 x i32]* %a0, i32 1, i32 %v57
+  %v61 = load i32, i32* %v60, align 4, !tbaa !0
+  %v62 = add nsw i32 %v61, %v59
+  store i32 %v62, i32* %v58, align 4, !tbaa !0
+  %v63 = sub nsw i32 %v59, %v61
+  store i32 %v63, i32* %v60, align 4, !tbaa !0
+  %v64 = tail call i32 @llvm.hexagon.A2.abs(i32 %v62)
+  %v65 = or i32 %v64, %v56
+  %v66 = tail call i32 @llvm.hexagon.A2.abs(i32 %v63)
+  %v67 = or i32 %v66, %v55
+  %v68 = add nsw i32 %v57, 1
+  %v69 = icmp eq i32 %v68, %a1
+  br i1 %v69, label %b8, label %b7
+
+b8:                                               ; preds = %b7
+  br label %b9
+
+b9:                                               ; preds = %b8, %b5, %b0
+  %v70 = phi i32 [ 0, %b0 ], [ %v52, %b5 ], [ %v67, %b8 ]
+  %v71 = phi i32 [ 0, %b0 ], [ %v53, %b5 ], [ %v65, %b8 ]
+  %v72 = load i32, i32* %a2, align 4, !tbaa !0
+  %v73 = or i32 %v72, %v71
+  store i32 %v73, i32* %a2, align 4, !tbaa !0
+  %v74 = getelementptr inbounds i32, i32* %a2, i32 1
+  %v75 = load i32, i32* %v74, align 4, !tbaa !0
+  %v76 = or i32 %v75, %v70
+  store i32 %v76, i32* %v74, align 4, !tbaa !0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.abs(i32) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-const-tc1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-const-tc1.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-const-tc1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-const-tc1.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,70 @@
+; RUN: llc -march=hexagon -enable-pipeliner -enable-pipeliner-opt-size \
+; RUN:     -verify-machineinstrs \
+; RUN:     -enable-aa-sched-mi=false -hexagon-expand-condsets=0 \
+; RUN:     < %s | FileCheck %s
+
+; Disable expand-condsets because it will assert on undefined registers.
+
+; Test that we change the CFG correctly for pipelined loops where the trip
+; count is a compile-time constant, and the trip count is the same as the
+; number of prolog blocks (i.e., stages).
+
+; CHECK: memb(r{{[0-9]+}}+#0) =
+; CHECK: memb(r{{[0-9]+}}+#0) =
+
+; Function Attrs: nounwind optsize
+define void @f0() #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b5, %b0
+  %v0 = load i16, i16* undef, align 2, !tbaa !0
+  %v1 = sext i16 %v0 to i32
+  %v2 = load i16, i16* undef, align 2, !tbaa !0
+  %v3 = sext i16 %v2 to i32
+  %v4 = and i32 %v1, 7
+  %v5 = and i32 %v3, 7
+  br label %b2
+
+b2:                                               ; preds = %b4, %b1
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v6 = phi i32 [ 0, %b2 ], [ %v22, %b3 ]
+  %v7 = add i32 %v6, undef
+  %v8 = icmp slt i32 undef, %v7
+  %v9 = add nsw i32 %v7, 1
+  %v10 = select i1 undef, i32 undef, i32 %v9
+  %v11 = add i32 %v10, 0
+  %v12 = getelementptr inbounds i8, i8* null, i32 %v11
+  %v13 = load i8, i8* %v12, align 1, !tbaa !4
+  %v14 = zext i8 %v13 to i32
+  %v15 = mul i32 %v14, %v4
+  %v16 = add i32 %v15, 0
+  %v17 = mul i32 %v16, %v5
+  %v18 = add i32 %v17, 32
+  %v19 = add i32 %v18, 0
+  %v20 = lshr i32 %v19, 6
+  %v21 = trunc i32 %v20 to i8
+  store i8 %v21, i8* undef, align 1, !tbaa !4
+  %v22 = add i32 %v6, 1
+  %v23 = icmp eq i32 %v22, 2
+  br i1 %v23, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  br i1 undef, label %b5, label %b2
+
+b5:                                               ; preds = %b4
+  br i1 undef, label %b1, label %b6
+
+b6:                                               ; preds = %b5
+  ret void
+}
+
+attributes #0 = { nounwind optsize "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!2, !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi7.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi7.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi7.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,275 @@
+; RUN: llc -march=hexagon -O2 -enable-pipeliner -disable-block-placement=0 < %s  | FileCheck %s
+
+; For the Phis generated in the epilog, test that we generate the correct
+; names for the values coming from the prolog stages. The test belows
+; checks that the value loaded in the first prolog block gets propagated
+; through the first epilog to the use after the loop.
+
+; CHECK: if ({{.*}}) jump
+; CHECK: [[VREG:v([0-9]+)]]{{.*}} = {{.*}}vmem(r{{[0-9]+}}++#1)
+; CHECK: if ({{.*}}) {{jump|jump:nt}} [[EPLOG1:(.*)]]
+; CHECK: if ({{.*}}) {{jump|jump:nt}} [[EPLOG:(.*)]]
+; CHECK: [[EPLOG]]:
+; CHECK: [[VREG1:v([0-9]+)]] = [[VREG]]
+; CHECK: [[VREG]] = v{{[0-9]+}}
+; CHECK: [[EPLOG1]]:
+; CHECK: = vlalign([[VREG]],[[VREG1]],#1)
+
+; Function Attrs: nounwind
+define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i8* noalias nocapture readonly %a3, i32 %a4, i8* noalias nocapture %a5, i32 %a6) #0 {
+b0:
+  %v0 = sub i32 0, %a1
+  %v1 = getelementptr inbounds i8, i8* %a0, i32 %v0
+  %v2 = bitcast i8* %v1 to <16 x i32>*
+  %v3 = bitcast i8* %a0 to <16 x i32>*
+  %v4 = getelementptr inbounds i8, i8* %a0, i32 %a1
+  %v5 = bitcast i8* %v4 to <16 x i32>*
+  %v6 = mul nsw i32 %a1, 2
+  %v7 = getelementptr inbounds i8, i8* %a0, i32 %v6
+  %v8 = bitcast i8* %v7 to <16 x i32>*
+  %v9 = bitcast i8* %a5 to <16 x i32>*
+  %v10 = getelementptr inbounds i8, i8* %a5, i32 %a6
+  %v11 = bitcast i8* %v10 to <16 x i32>*
+  %v12 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
+  %v13 = load <16 x i32>, <16 x i32>* %v2, align 64, !tbaa !0
+  %v14 = load <16 x i32>, <16 x i32>* %v3, align 64, !tbaa !0
+  %v15 = load <16 x i32>, <16 x i32>* %v5, align 64, !tbaa !0
+  %v16 = load <16 x i32>, <16 x i32>* %v8, align 64, !tbaa !0
+  %v17 = load i8, i8* %a3, align 1, !tbaa !0
+  %v18 = getelementptr inbounds i8, i8* %a3, i32 1
+  %v19 = load i8, i8* %v18, align 1, !tbaa !0
+  %v20 = zext i8 %v19 to i64
+  %v21 = shl nuw nsw i64 %v20, 24
+  %v22 = zext i8 %v17 to i64
+  %v23 = shl nuw nsw i64 %v22, 16
+  %v24 = shl nuw nsw i64 %v20, 8
+  %v25 = or i64 %v22, %v23
+  %v26 = or i64 %v21, %v25
+  %v27 = or i64 %v24, %v26
+  %v28 = trunc i64 %v27 to i32
+  %v29 = getelementptr inbounds i8, i8* %a3, i32 3
+  %v30 = load i8, i8* %v29, align 1, !tbaa !0
+  %v31 = getelementptr inbounds i8, i8* %a3, i32 4
+  %v32 = load i8, i8* %v31, align 1, !tbaa !0
+  %v33 = zext i8 %v32 to i64
+  %v34 = shl nuw nsw i64 %v33, 24
+  %v35 = zext i8 %v30 to i64
+  %v36 = shl nuw nsw i64 %v35, 16
+  %v37 = shl nuw nsw i64 %v33, 8
+  %v38 = or i64 %v35, %v36
+  %v39 = or i64 %v34, %v38
+  %v40 = or i64 %v37, %v39
+  %v41 = trunc i64 %v40 to i32
+  %v42 = getelementptr inbounds i8, i8* %a3, i32 6
+  %v43 = load i8, i8* %v42, align 1, !tbaa !0
+  %v44 = getelementptr inbounds i8, i8* %a3, i32 7
+  %v45 = load i8, i8* %v44, align 1, !tbaa !0
+  %v46 = zext i8 %v45 to i64
+  %v47 = shl nuw nsw i64 %v46, 24
+  %v48 = zext i8 %v43 to i64
+  %v49 = shl nuw nsw i64 %v48, 16
+  %v50 = shl nuw nsw i64 %v46, 8
+  %v51 = or i64 %v48, %v49
+  %v52 = or i64 %v47, %v51
+  %v53 = or i64 %v50, %v52
+  %v54 = trunc i64 %v53 to i32
+  %v55 = getelementptr inbounds i8, i8* %a3, i32 5
+  %v56 = load i8, i8* %v55, align 1, !tbaa !0
+  %v57 = getelementptr inbounds i8, i8* %a3, i32 2
+  %v58 = load i8, i8* %v57, align 1, !tbaa !0
+  %v59 = zext i8 %v58 to i64
+  %v60 = shl nuw nsw i64 %v59, 24
+  %v61 = zext i8 %v56 to i64
+  %v62 = shl nuw nsw i64 %v61, 16
+  %v63 = shl nuw nsw i64 %v59, 8
+  %v64 = or i64 %v61, %v62
+  %v65 = or i64 %v60, %v64
+  %v66 = or i64 %v63, %v65
+  %v67 = trunc i64 %v66 to i32
+  %v68 = getelementptr inbounds i8, i8* %a3, i32 8
+  %v69 = load i8, i8* %v68, align 1, !tbaa !0
+  %v70 = zext i8 %v69 to i64
+  %v71 = shl nuw nsw i64 %v70, 24
+  %v72 = shl nuw nsw i64 %v70, 16
+  %v73 = shl nuw nsw i64 %v70, 8
+  %v74 = or i64 %v70, %v72
+  %v75 = or i64 %v71, %v74
+  %v76 = or i64 %v73, %v75
+  %v77 = trunc i64 %v76 to i32
+  %v78 = icmp sgt i32 %a2, 64
+  br i1 %v78, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v79 = add i32 %v6, 64
+  %v80 = getelementptr inbounds i8, i8* %a0, i32 %v79
+  %v81 = bitcast i8* %v80 to <16 x i32>*
+  %v82 = add i32 %a1, 64
+  %v83 = getelementptr inbounds i8, i8* %a0, i32 %v82
+  %v84 = bitcast i8* %v83 to <16 x i32>*
+  %v85 = getelementptr inbounds i8, i8* %a0, i32 64
+  %v86 = bitcast i8* %v85 to <16 x i32>*
+  %v87 = sub i32 64, %a1
+  %v88 = getelementptr inbounds i8, i8* %a0, i32 %v87
+  %v89 = bitcast i8* %v88 to <16 x i32>*
+  %v90 = add i32 %a2, -65
+  %v91 = lshr i32 %v90, 6
+  %v92 = mul i32 %v91, 64
+  %v93 = add i32 %v92, %a6
+  %v94 = add i32 %v93, 64
+  %v95 = getelementptr i8, i8* %a5, i32 %v94
+  %v96 = add i32 %v92, 64
+  %v97 = getelementptr i8, i8* %a5, i32 %v96
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v98 = phi i32 [ %a2, %b1 ], [ %v153, %b2 ]
+  %v99 = phi <16 x i32> [ %v12, %b1 ], [ %v100, %b2 ]
+  %v100 = phi <16 x i32> [ %v13, %b1 ], [ %v118, %b2 ]
+  %v101 = phi <16 x i32> [ %v12, %b1 ], [ %v102, %b2 ]
+  %v102 = phi <16 x i32> [ %v14, %b1 ], [ %v120, %b2 ]
+  %v103 = phi <16 x i32> [ %v12, %b1 ], [ %v104, %b2 ]
+  %v104 = phi <16 x i32> [ %v15, %b1 ], [ %v122, %b2 ]
+  %v105 = phi <16 x i32> [ %v12, %b1 ], [ %v106, %b2 ]
+  %v106 = phi <16 x i32> [ %v16, %b1 ], [ %v124, %b2 ]
+  %v107 = phi <16 x i32>* [ %v89, %b1 ], [ %v117, %b2 ]
+  %v108 = phi <16 x i32>* [ %v86, %b1 ], [ %v119, %b2 ]
+  %v109 = phi <16 x i32>* [ %v84, %b1 ], [ %v121, %b2 ]
+  %v110 = phi <16 x i32>* [ %v81, %b1 ], [ %v123, %b2 ]
+  %v111 = phi <16 x i32>* [ %v9, %b1 ], [ %v148, %b2 ]
+  %v112 = phi <16 x i32>* [ %v11, %b1 ], [ %v152, %b2 ]
+  %v113 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v100, <16 x i32> %v99, i32 1)
+  %v114 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v102, <16 x i32> %v101, i32 1)
+  %v115 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v104, <16 x i32> %v103, i32 1)
+  %v116 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v106, <16 x i32> %v105, i32 1)
+  %v117 = getelementptr inbounds <16 x i32>, <16 x i32>* %v107, i32 1
+  %v118 = load <16 x i32>, <16 x i32>* %v107, align 64, !tbaa !0
+  %v119 = getelementptr inbounds <16 x i32>, <16 x i32>* %v108, i32 1
+  %v120 = load <16 x i32>, <16 x i32>* %v108, align 64, !tbaa !0
+  %v121 = getelementptr inbounds <16 x i32>, <16 x i32>* %v109, i32 1
+  %v122 = load <16 x i32>, <16 x i32>* %v109, align 64, !tbaa !0
+  %v123 = getelementptr inbounds <16 x i32>, <16 x i32>* %v110, i32 1
+  %v124 = load <16 x i32>, <16 x i32>* %v110, align 64, !tbaa !0
+  %v125 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v118, <16 x i32> %v100, i32 1)
+  %v126 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v120, <16 x i32> %v102, i32 1)
+  %v127 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v122, <16 x i32> %v104, i32 1)
+  %v128 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v124, <16 x i32> %v106, i32 1)
+  %v129 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v125, <16 x i32> %v113)
+  %v130 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v126, <16 x i32> %v114)
+  %v131 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %v129, i32 %v28)
+  %v132 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %v130, i32 %v28)
+  %v133 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v127, <16 x i32> %v115)
+  %v134 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v131, <32 x i32> %v130, i32 %v41)
+  %v135 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v132, <32 x i32> %v133, i32 %v41)
+  %v136 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v125, <16 x i32> %v126)
+  %v137 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v126, <16 x i32> %v127)
+  %v138 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v134, <32 x i32> %v136, i32 %v67)
+  %v139 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v135, <32 x i32> %v137, i32 %v67)
+  %v140 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v128, <16 x i32> %v116)
+  %v141 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v138, <32 x i32> %v133, i32 %v54)
+  %v142 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v139, <32 x i32> %v140, i32 %v54)
+  %v143 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v141, <16 x i32> %v127, i32 %v77)
+  %v144 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v142, <16 x i32> %v128, i32 %v77)
+  %v145 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v143)
+  %v146 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v143)
+  %v147 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v145, <16 x i32> %v146, i32 %a4)
+  %v148 = getelementptr inbounds <16 x i32>, <16 x i32>* %v111, i32 1
+  store <16 x i32> %v147, <16 x i32>* %v111, align 64, !tbaa !0
+  %v149 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v144)
+  %v150 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v144)
+  %v151 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v149, <16 x i32> %v150, i32 %a4)
+  %v152 = getelementptr inbounds <16 x i32>, <16 x i32>* %v112, i32 1
+  store <16 x i32> %v151, <16 x i32>* %v112, align 64, !tbaa !0
+  %v153 = add nsw i32 %v98, -64
+  %v154 = icmp sgt i32 %v153, 64
+  br i1 %v154, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  %v155 = bitcast i8* %v95 to <16 x i32>*
+  %v156 = bitcast i8* %v97 to <16 x i32>*
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  %v157 = phi <16 x i32> [ %v100, %b3 ], [ %v12, %b0 ]
+  %v158 = phi <16 x i32> [ %v118, %b3 ], [ %v13, %b0 ]
+  %v159 = phi <16 x i32> [ %v102, %b3 ], [ %v12, %b0 ]
+  %v160 = phi <16 x i32> [ %v120, %b3 ], [ %v14, %b0 ]
+  %v161 = phi <16 x i32> [ %v104, %b3 ], [ %v12, %b0 ]
+  %v162 = phi <16 x i32> [ %v122, %b3 ], [ %v15, %b0 ]
+  %v163 = phi <16 x i32> [ %v106, %b3 ], [ %v12, %b0 ]
+  %v164 = phi <16 x i32> [ %v124, %b3 ], [ %v16, %b0 ]
+  %v165 = phi <16 x i32>* [ %v156, %b3 ], [ %v9, %b0 ]
+  %v166 = phi <16 x i32>* [ %v155, %b3 ], [ %v11, %b0 ]
+  %v167 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v158, <16 x i32> %v157, i32 1)
+  %v168 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v160, <16 x i32> %v159, i32 1)
+  %v169 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v162, <16 x i32> %v161, i32 1)
+  %v170 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v164, <16 x i32> %v163, i32 1)
+  %v171 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v158, <16 x i32> %v158, i32 1)
+  %v172 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v160, <16 x i32> %v160, i32 1)
+  %v173 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v162, <16 x i32> %v162, i32 1)
+  %v174 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v164, <16 x i32> %v164, i32 1)
+  %v175 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v171, <16 x i32> %v167)
+  %v176 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v172, <16 x i32> %v168)
+  %v177 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %v175, i32 %v28)
+  %v178 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %v176, i32 %v28)
+  %v179 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v173, <16 x i32> %v169)
+  %v180 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v177, <32 x i32> %v176, i32 %v41)
+  %v181 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v178, <32 x i32> %v179, i32 %v41)
+  %v182 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v171, <16 x i32> %v172)
+  %v183 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v172, <16 x i32> %v173)
+  %v184 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v180, <32 x i32> %v182, i32 %v67)
+  %v185 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v181, <32 x i32> %v183, i32 %v67)
+  %v186 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v174, <16 x i32> %v170)
+  %v187 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v184, <32 x i32> %v179, i32 %v54)
+  %v188 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v185, <32 x i32> %v186, i32 %v54)
+  %v189 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v187, <16 x i32> %v173, i32 %v77)
+  %v190 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v188, <16 x i32> %v174, i32 %v77)
+  %v191 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v189)
+  %v192 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v189)
+  %v193 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v191, <16 x i32> %v192, i32 %a4)
+  store <16 x i32> %v193, <16 x i32>* %v165, align 64, !tbaa !0
+  %v194 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v190)
+  %v195 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v190)
+  %v196 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v194, <16 x i32> %v195, i32 %a4)
+  store <16 x i32> %v196, <16 x i32>* %v166, align 64, !tbaa !0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vd0() #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-memrefs-epilog1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-memrefs-epilog1.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-memrefs-epilog1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-memrefs-epilog1.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,90 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s | FileCheck %s
+
+; Test that a store and load, that alias, are not put in the same packet. The
+; pipeliner altered the size of the memrefs for these instructions, which
+; resulted in no order dependence between the instructions in the DAG. No order
+; dependence was added since the size was set to UINT_MAX, but there is a
+; computation using the size that overflowed.
+
+; CHECK: endloop0
+; CHECK: memh([[REG:r([0-9]+)]]+#0) =
+; CHECK: = memh([[REG]]++#2)
+
+; Function Attrs: nounwind
+define signext i16 @f0(i16* nocapture readonly %a0, i16* nocapture readonly %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = alloca [40 x i16], align 8
+  %v1 = bitcast [40 x i16]* %v0 to i8*
+  call void @llvm.lifetime.start.p0i8(i64 80, i8* nonnull %v1) #2
+  %v2 = getelementptr inbounds [40 x i16], [40 x i16]* %v0, i32 0, i32 0
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v3 = phi i16* [ %a1, %b0 ], [ %v24, %b1 ]
+  %v4 = phi i16* [ %v2, %b0 ], [ %v25, %b1 ]
+  %v5 = phi i32 [ 0, %b0 ], [ %v14, %b1 ]
+  %v6 = phi i32 [ 1, %b0 ], [ %v22, %b1 ]
+  %v7 = phi i32 [ 0, %b0 ], [ %v23, %b1 ]
+  %v8 = load i16, i16* %v3, align 2
+  %v9 = sext i16 %v8 to i32
+  %v10 = tail call i32 @llvm.hexagon.A2.aslh(i32 %v9)
+  %v11 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v10, i32 1)
+  %v12 = tail call i32 @llvm.hexagon.A2.asrh(i32 %v11)
+  %v13 = trunc i32 %v12 to i16
+  store i16 %v13, i16* %v4, align 2
+  %v14 = add nuw nsw i32 %v5, 1
+  %v15 = icmp eq i32 %v14, 40
+  %v16 = getelementptr inbounds i16, i16* %a0, i32 %v7
+  %v17 = load i16, i16* %v16, align 2
+  %v18 = sext i16 %v17 to i32
+  %v19 = getelementptr inbounds [40 x i16], [40 x i16]* %v0, i32 0, i32 %v7
+  %v20 = load i16, i16* %v19, align 2
+  %v21 = sext i16 %v20 to i32
+  %v22 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v6, i32 %v18, i32 %v21)
+  %v23 = add nuw nsw i32 %v7, 1
+  %v24 = getelementptr i16, i16* %v3, i32 1
+  %v25 = getelementptr i16, i16* %v4, i32 1
+  br i1 %v15, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v26 = tail call signext i16 @f1(i32 %v22) #0
+  %v27 = sext i16 %v26 to i32
+  %v28 = tail call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %v22, i32 %v27)
+  %v29 = tail call i32 @llvm.hexagon.A2.asrh(i32 %v28)
+  %v30 = shl i32 %v29, 16
+  %v31 = ashr exact i32 %v30, 16
+  %v32 = icmp slt i32 %v30, 65536
+  br label %b3
+
+b3:                                               ; preds = %b2
+  call void @llvm.lifetime.end.p0i8(i64 80, i8* nonnull %v1) #2
+  ret i16 0
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #2
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.aslh(i32) #2
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.asrh(i32) #2
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32) #2
+
+; Function Attrs: nounwind
+declare signext i16 @f1(i32) local_unnamed_addr #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32) #2
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }
+attributes #2 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-phi-ch-offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-phi-ch-offset.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-phi-ch-offset.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-phi-ch-offset.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,66 @@
+; RUN: llc -march=hexagon -enable-pipeliner -pipeliner-max-stages=2 < %s | FileCheck %s
+
+; Test that we generate the correct offsets after we removed unneeded
+; chain dependences between Phis and generated a better pipeline.
+
+; CHECK: loop0(.LBB0_[[LOOP:.]],
+; CHECK: .LBB0_[[LOOP]]:
+; CHECK: = memd([[REG0:(r[0-9]+)]]+#8)
+; CHECK: memd([[REG0]]++#8) =
+; CHECK: }{{[ \t]*}}:endloop0
+
+ at g0 = common global [400 x i8] zeroinitializer, align 8
+ at g1 = common global [400 x i8] zeroinitializer, align 8
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br label %b2
+
+b1:                                               ; preds = %b2
+  ret void
+
+b2:                                               ; preds = %b2, %b0
+  %v0 = phi i8* [ getelementptr inbounds ([400 x i8], [400 x i8]* @g0, i32 0, i32 0), %b0 ], [ %v23, %b2 ]
+  %v1 = phi i8* [ getelementptr inbounds ([400 x i8], [400 x i8]* @g1, i32 0, i32 0), %b0 ], [ %v24, %b2 ]
+  %v2 = phi i32 [ 0, %b0 ], [ %v21, %b2 ]
+  %v3 = bitcast i8* %v0 to <8 x i8>*
+  %v4 = load <8 x i8>, <8 x i8>* %v3, align 8
+  %v5 = bitcast i8* %v1 to <8 x i8>*
+  %v6 = load <8 x i8>, <8 x i8>* %v5, align 8
+  %v7 = bitcast <8 x i8> %v4 to <2 x i32>
+  %v8 = extractelement <2 x i32> %v7, i32 0
+  %v9 = extractelement <2 x i32> %v7, i32 1
+  %v10 = bitcast <8 x i8> %v6 to <2 x i32>
+  %v11 = extractelement <2 x i32> %v10, i32 0
+  %v12 = extractelement <2 x i32> %v10, i32 1
+  %v13 = tail call i64 @llvm.hexagon.S2.vzxtbh(i32 %v11)
+  %v14 = tail call i64 @llvm.hexagon.S2.vzxtbh(i32 %v12)
+  %v15 = tail call i64 @llvm.hexagon.M5.vmacbsu(i64 %v13, i32 %v8, i32 117901063)
+  %v16 = tail call i64 @llvm.hexagon.M5.vmacbsu(i64 %v14, i32 %v9, i32 117901063)
+  %v17 = tail call i32 @llvm.hexagon.S2.vtrunehb(i64 %v15)
+  %v18 = tail call i32 @llvm.hexagon.S2.vtrunehb(i64 %v16)
+  %v19 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v18, i32 %v17)
+  %v20 = bitcast i64 %v19 to <8 x i8>
+  store <8 x i8> %v20, <8 x i8>* %v5, align 8
+  %v21 = add nsw i32 %v2, 8
+  %v22 = icmp slt i32 %v2, 392
+  %v23 = getelementptr i8, i8* %v0, i32 8
+  %v24 = getelementptr i8, i8* %v1, i32 8
+  br i1 %v22, label %b2, label %b1
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.vzxtbh(i32) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M5.vmacbsu(i64, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.vtrunehb(i64) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-resmii-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-resmii-1.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-resmii-1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-resmii-1.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,114 @@
+; RUN: llc -march=hexagon -enable-pipeliner -debug-only=pipeliner < %s -o - 2>&1 > /dev/null | FileCheck %s
+; REQUIRES: asserts
+
+; Test that checks that we compute the correct ResMII for haar.
+
+; CHECK: MII = 4 (rec=1, res=4)
+
+; Function Attrs: nounwind
+define void @f0(i16* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8* noalias nocapture %a4, i32 %a5) #0 {
+b0:
+  %v0 = ashr i32 %a3, 2
+  %v1 = ashr i32 %a3, 1
+  %v2 = add i32 %v1, %v0
+  %v3 = icmp sgt i32 %a2, 0
+  br i1 %v3, label %b1, label %b8
+
+b1:                                               ; preds = %b0
+  %v4 = sdiv i32 %a1, 64
+  %v5 = icmp sgt i32 %a1, 63
+  br label %b2
+
+b2:                                               ; preds = %b6, %b1
+  %v6 = phi i32 [ 0, %b1 ], [ %v56, %b6 ]
+  %v7 = ashr exact i32 %v6, 1
+  %v8 = mul nsw i32 %v7, %a3
+  br i1 %v5, label %b3, label %b6
+
+b3:                                               ; preds = %b2
+  %v9 = add nsw i32 %v6, 1
+  %v10 = mul nsw i32 %v9, %a5
+  %v11 = mul nsw i32 %v6, %a5
+  %v12 = add i32 %v2, %v8
+  %v13 = add i32 %v8, %v0
+  %v14 = add i32 %v8, %v1
+  %v15 = getelementptr inbounds i8, i8* %a4, i32 %v10
+  %v16 = getelementptr inbounds i8, i8* %a4, i32 %v11
+  %v17 = getelementptr inbounds i16, i16* %a0, i32 %v12
+  %v18 = getelementptr inbounds i16, i16* %a0, i32 %v13
+  %v19 = getelementptr inbounds i16, i16* %a0, i32 %v14
+  %v20 = getelementptr inbounds i16, i16* %a0, i32 %v8
+  %v21 = bitcast i8* %v15 to <16 x i32>*
+  %v22 = bitcast i8* %v16 to <16 x i32>*
+  %v23 = bitcast i16* %v17 to <16 x i32>*
+  %v24 = bitcast i16* %v18 to <16 x i32>*
+  %v25 = bitcast i16* %v19 to <16 x i32>*
+  %v26 = bitcast i16* %v20 to <16 x i32>*
+  br label %b4
+
+b4:                                               ; preds = %b4, %b3
+  %v27 = phi i32 [ 0, %b3 ], [ %v54, %b4 ]
+  %v28 = phi <16 x i32>* [ %v26, %b3 ], [ %v34, %b4 ]
+  %v29 = phi <16 x i32>* [ %v25, %b3 ], [ %v36, %b4 ]
+  %v30 = phi <16 x i32>* [ %v24, %b3 ], [ %v38, %b4 ]
+  %v31 = phi <16 x i32>* [ %v23, %b3 ], [ %v40, %b4 ]
+  %v32 = phi <16 x i32>* [ %v21, %b3 ], [ %v53, %b4 ]
+  %v33 = phi <16 x i32>* [ %v22, %b3 ], [ %v52, %b4 ]
+  %v34 = getelementptr inbounds <16 x i32>, <16 x i32>* %v28, i32 1
+  %v35 = load <16 x i32>, <16 x i32>* %v28, align 64
+  %v36 = getelementptr inbounds <16 x i32>, <16 x i32>* %v29, i32 1
+  %v37 = load <16 x i32>, <16 x i32>* %v29, align 64
+  %v38 = getelementptr inbounds <16 x i32>, <16 x i32>* %v30, i32 1
+  %v39 = load <16 x i32>, <16 x i32>* %v30, align 64
+  %v40 = getelementptr inbounds <16 x i32>, <16 x i32>* %v31, i32 1
+  %v41 = load <16 x i32>, <16 x i32>* %v31, align 64
+  %v42 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v35, <16 x i32> %v37)
+  %v43 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v35, <16 x i32> %v37)
+  %v44 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v39, <16 x i32> %v41)
+  %v45 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v39, <16 x i32> %v41)
+  %v46 = tail call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %v42, <16 x i32> %v44)
+  %v47 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %v42, <16 x i32> %v44)
+  %v48 = tail call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %v43, <16 x i32> %v45)
+  %v49 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %v43, <16 x i32> %v45)
+  %v50 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v47, <16 x i32> %v46)
+  %v51 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v49, <16 x i32> %v48)
+  %v52 = getelementptr inbounds <16 x i32>, <16 x i32>* %v33, i32 1
+  store <16 x i32> %v50, <16 x i32>* %v33, align 64
+  %v53 = getelementptr inbounds <16 x i32>, <16 x i32>* %v32, i32 1
+  store <16 x i32> %v51, <16 x i32>* %v32, align 64
+  %v54 = add nsw i32 %v27, 1
+  %v55 = icmp slt i32 %v54, %v4
+  br i1 %v55, label %b4, label %b5
+
+b5:                                               ; preds = %b4
+  br label %b6
+
+b6:                                               ; preds = %b5, %b2
+  %v56 = add nsw i32 %v6, 2
+  %v57 = icmp slt i32 %v56, %a2
+  br i1 %v57, label %b2, label %b7
+
+b7:                                               ; preds = %b6
+  br label %b8
+
+b8:                                               ; preds = %b7, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/vdotprod.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vdotprod.ll?rev=328561&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vdotprod.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vdotprod.ll Mon Mar 26 10:53:48 2018
@@ -0,0 +1,51 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that we generate a single packet for the vectorized dot product loop.
+
+; CHECK: loop0(.LBB0_[[LOOP:.]],
+; CHECK: .LBB0_[[LOOP]]:
+; CHECK: {
+; CHECK: mpyi
+; CHECK: mpyi
+; CHECK: memd
+; CHECK: memd
+; CHECK-NOT: {
+; CHECK: }{{[ \t]*}}:endloop
+
+; Function Attrs: nounwind readonly
+define i32 @f0(i32* nocapture readonly %a0, i32* nocapture readonly %a1) #0 {
+b0:
+  %v0 = bitcast i32* %a0 to i64*
+  %v1 = bitcast i32* %a1 to i64*
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v2 = phi i64* [ %v0, %b0 ], [ %v21, %b1 ]
+  %v3 = phi i64* [ %v1, %b0 ], [ %v22, %b1 ]
+  %v4 = phi i32 [ 0, %b0 ], [ %v19, %b1 ]
+  %v5 = phi i32 [ 0, %b0 ], [ %v14, %b1 ]
+  %v6 = phi i32 [ 0, %b0 ], [ %v18, %b1 ]
+  %v7 = load i64, i64* %v2, align 8
+  %v8 = trunc i64 %v7 to i32
+  %v9 = lshr i64 %v7, 32
+  %v10 = load i64, i64* %v3, align 8
+  %v11 = trunc i64 %v10 to i32
+  %v12 = lshr i64 %v10, 32
+  %v13 = mul nsw i32 %v11, %v8
+  %v14 = add nsw i32 %v13, %v5
+  %v15 = trunc i64 %v9 to i32
+  %v16 = trunc i64 %v12 to i32
+  %v17 = mul nsw i32 %v16, %v15
+  %v18 = add nsw i32 %v17, %v6
+  %v19 = add nsw i32 %v4, 1
+  %v20 = icmp eq i32 %v19, 199
+  %v21 = getelementptr i64, i64* %v2, i32 1
+  %v22 = getelementptr i64, i64* %v3, i32 1
+  br i1 %v20, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v23 = add nsw i32 %v14, %v18
+  ret i32 %v23
+}
+
+attributes #0 = { nounwind readonly }




More information about the llvm-commits mailing list