[llvm] r327271 - [Hexagon] Add more lit tests

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 12 07:01:30 PDT 2018


Added: llvm/trunk/test/CodeGen/Hexagon/stack-guard-acceptable-type.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/stack-guard-acceptable-type.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/stack-guard-acceptable-type.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/stack-guard-acceptable-type.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,33 @@
+; Check that we accept a user definition/declaration of __stack_chk_guard
+; that is not the expected type (i8*) but one of the same size.
+;
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK: __stack_chk_fail
+
+target triple = "hexagon"
+
+ at __stack_chk_guard = external global i32, align 4
+ at g0 = private unnamed_addr constant [37 x i8] c"This string is longer than 16 bytes\0A\00", align 1
+
+; Function Attrs: noinline nounwind ssp
+define zeroext i8 @f0(i32 %a0) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca [64 x i8], align 8
+  %v2 = alloca i8*, align 4
+  store i32 %a0, i32* %v0, align 4
+  store i8* getelementptr inbounds ([37 x i8], [37 x i8]* @g0, i32 0, i32 0), i8** %v2, align 4
+  %v3 = getelementptr inbounds [64 x i8], [64 x i8]* %v1, i32 0, i32 0
+  %v4 = load i8*, i8** %v2, align 4
+  %v5 = call i8* @f1(i8* %v3, i8* %v4) #1
+  %v6 = load i32, i32* %v0, align 4
+  %v7 = getelementptr inbounds [64 x i8], [64 x i8]* %v1, i32 0, i32 %v6
+  %v8 = load i8, i8* %v7, align 1
+  ret i8 %v8
+}
+
+; Function Attrs: nounwind
+declare i8* @f1(i8*, i8*) #1
+
+attributes #0 = { noinline nounwind ssp }
+attributes #1 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/store-AbsSet.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/store-AbsSet.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/store-AbsSet.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/store-AbsSet.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,67 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Validates correct operand order for absolute-set stores.
+
+%s.0 = type { %s.1, %s.2, %s.3, %s.3, %s.4, [8 x i8] }
+%s.1 = type { i8 }
+%s.2 = type { i8 }
+%s.3 = type { i32 }
+%s.4 = type { i32, i32, i32 }
+
+; Function Attrs: nounwind ssp
+define void @f0(%s.0* nocapture readonly %a0, i32 %a1) #0 {
+b0:
+  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 0, i32 0
+  %v1 = load i8, i8* %v0, align 1
+  %v2 = and i32 %a1, 1
+  %v3 = icmp eq i32 %v2, 0
+  br i1 %v3, label %b4, label %b1
+
+b1:                                               ; preds = %b0
+  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 1, i32 0
+  %v5 = load i8, i8* %v4, align 1
+  %v6 = icmp eq i8 %v5, 0
+  br i1 %v6, label %b3, label %b2
+
+b2:                                               ; preds = %b1
+  %v7 = getelementptr %s.0, %s.0* %a0, i32 0, i32 2, i32 0
+  %v8 = load i32, i32* %v7, align 4
+  store volatile i32 %v8, i32* inttoptr (i32 -318766672 to i32*), align 16
+  %v9 = getelementptr %s.0, %s.0* %a0, i32 0, i32 3, i32 0
+  %v10 = load i32, i32* %v9, align 4
+  store volatile i32 %v10, i32* inttoptr (i32 -318766672 to i32*), align 16
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v11 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 4, i32 0
+  %v12 = load i32, i32* %v11, align 4
+  %v13 = zext i8 %v1 to i32
+  %v14 = mul nsw i32 %v13, 64
+  %v15 = add nsw i32 %v14, -318111684
+  %v16 = inttoptr i32 %v15 to i32*
+  store volatile i32 %v12, i32* %v16, align 4
+  %v17 = shl i32 1, %v13
+  %v18 = load volatile i32, i32* inttoptr (i32 -318111596 to i32*), align 4
+  %v19 = and i32 %v17, 3
+  %v20 = xor i32 %v19, 3
+  %v21 = and i32 %v18, %v20
+  %v22 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 4, i32 1
+  %v23 = load i32, i32* %v22, align 4
+  %v24 = and i32 %v23, 1
+  %v25 = shl i32 %v24, %v13
+  %v26 = or i32 %v25, %v21
+  store volatile i32 %v26, i32* inttoptr (i32 -318111596 to i32*), align 4
+  %v27 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 4, i32 2
+  %v28 = load i32, i32* %v27, align 4
+  %v29 = mul nsw i32 %v13, 4
+  %v30 = add nsw i32 %v29, -318111592
+  %v31 = inttoptr i32 %v30 to i32*
+  store volatile i32 %v28, i32* %v31, align 4
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  ret void
+}
+
+attributes #0 = { nounwind ssp }

Added: llvm/trunk/test/CodeGen/Hexagon/store-abs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/store-abs.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/store-abs.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/store-abs.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,75 @@
+; RUN: llc -march=hexagon -O3 -hexagon-small-data-threshold=0 < %s | FileCheck %s
+; This lit test validates that storetrunc for a 64bit value picks a store
+; absolute pattern instead of base + index store pattern. This will facilitate
+; the constant extender optimization pass to move the immediate value to a register
+; if there are more than two uses and replace all the uses of the constant.
+; Generation of absolute pattern for a 64 bit truncated value also aviods an
+; extra move.
+
+ at g0 = external global i8, align 8
+ at g1 = external global i16, align 8
+ at g2 = external global i32, align 8
+
+; CHECK-LABEL: f0:
+; CHECK: memd(##441656) = r{{[0-9]+}}
+define void @f0(i64 %a0) #0 {
+b0:
+  store volatile i64 %a0, i64* inttoptr (i32 441656 to i64*)
+  ret void
+}
+
+; CHECK-LABEL: f1:
+; CHECK: memw(##441656) = r{{[0-9]+}}
+define void @f1(i64 %a0) #0 {
+b0:
+  %v0 = trunc i64 %a0 to i32
+  store volatile i32 %v0, i32* inttoptr (i32 441656 to i32*)
+  ret void
+}
+
+; CHECK-LABEL: f2:
+; CHECK: memh(##441656) = r{{[0-9]+}}
+define void @f2(i64 %a0) #0 {
+b0:
+  %v0 = trunc i64 %a0 to i16
+  store volatile i16 %v0, i16* inttoptr (i32 441656 to i16*)
+  ret void
+}
+
+; CHECK-LABEL: f3:
+; CHECK: memb(##441656) = r{{[0-9]+}}
+define void @f3(i64 %a0) #0 {
+b0:
+  %v0 = trunc i64 %a0 to i8
+  store volatile i8 %v0, i8* inttoptr (i32 441656 to i8*)
+  ret void
+}
+
+; CHECK-LABEL: f4:
+; CHECK: memw(##g2) = r{{[0-9]+}}
+define void @f4(i64 %a0) #0 {
+b0:
+  %v0 = trunc i64 %a0 to i32
+  store volatile i32 %v0, i32* @g2
+  ret void
+}
+
+; CHECK-LABEL: f5:
+; CHECK: memh(##g1) = r{{[0-9]+}}
+define void @f5(i64 %a0) #0 {
+b0:
+  %v0 = trunc i64 %a0 to i16
+  store volatile i16 %v0, i16* @g1
+  ret void
+}
+
+; CHECK-LABEL: f6:
+; CHECK: memb(##g0) = r{{[0-9]+}}
+define void @f6(i64 %a0) #0 {
+b0:
+  %v0 = trunc i64 %a0 to i8
+  store volatile i8 %v0, i8* @g0
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/store-constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/store-constant.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/store-constant.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/store-constant.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,51 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Generate stores with assignment of constant values.
+
+; CHECK: memw{{.*}} = {{.*}}#0
+; CHECK: memw{{.*}} = {{.*}}#1
+; CHECK: memh{{.*}} = {{.*}}#2
+; CHECK: memh{{.*}} = {{.*}}#3
+; CHECK: memb{{.*}} = {{.*}}#4
+; CHECK: memb{{.*}} = {{.*}}#5
+
+define void @f0(i32* nocapture %a0) #0 {
+b0:
+  store i32 0, i32* %a0, align 4
+  ret void
+}
+
+define void @f1(i32* nocapture %a0) #0 {
+b0:
+  %v0 = getelementptr inbounds i32, i32* %a0, i32 1
+  store i32 1, i32* %v0, align 4
+  ret void
+}
+
+define void @f2(i16* nocapture %a0) #0 {
+b0:
+  store i16 2, i16* %a0, align 2
+  ret void
+}
+
+define void @f3(i16* nocapture %a0) #0 {
+b0:
+  %v0 = getelementptr inbounds i16, i16* %a0, i32 2
+  store i16 3, i16* %v0, align 2
+  ret void
+}
+
+define void @f4(i8* nocapture %a0) #0 {
+b0:
+  store i8 4, i8* %a0, align 1
+  ret void
+}
+
+define void @f5(i8* nocapture %a0) #0 {
+b0:
+  %v0 = getelementptr inbounds i8, i8* %a0, i32 2
+  store i8 5, i8* %v0, align 1
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/store-imm-byte.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/store-imm-byte.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/store-imm-byte.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/store-imm-byte.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,13 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: memb{{.*}} = #-1
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i8* %a0) #0 {
+b0:
+  store i8 -1, i8* %a0, align 2
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/store-imm-halword.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/store-imm-halword.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/store-imm-halword.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/store-imm-halword.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,13 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: memh{{.*}} = #-1
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i16* %a0) #0 {
+b0:
+  store i16 -1, i16* %a0, align 2
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/store-imm-word.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/store-imm-word.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/store-imm-word.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/store-imm-word.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,13 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: memw{{.*}} = #-1
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i32* %a0) #0 {
+b0:
+  store i32 -1, i32* %a0, align 4
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/store-widen-subreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/store-widen-subreg.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/store-widen-subreg.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/store-widen-subreg.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,59 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that the store widening optimization correctly transforms to a wider
+; instruction with a sub register. Recently, the store widening occurs in the
+; DAG combiner, so this test doesn't fail any more.
+
+; CHECK: memh({{r[0-9]+}}+#{{[0-9]+}}) =
+
+%s.0 = type { %s.1, %s.2, %s.3*, %s.0*, i32, i8, i8, i32, i8, i8, i32, i32, i8, i32, %s.4*, [2 x %s.4*], %s.13, i8*, %s.15*, %s.26, i32, i32, i32 }
+%s.1 = type { i64, [8 x i8] }
+%s.2 = type { i64*, i32, i8 }
+%s.3 = type { %s.1, %s.26, %s.26, i32, i32, i32, void (%s.1*)*, void (%s.1*)*, i32 (%s.1*)*, void (%s.1*)*, i32, i64* }
+%s.4 = type { %s.5, %s.12 }
+%s.5 = type { i32, i32, i32, i32, i32, i32, i32, i32, %s.6 }
+%s.6 = type { %s.7 }
+%s.7 = type { i32, i32, %s.8, %s.9, i32, [4 x %s.10], %s.11 }
+%s.8 = type { i32, i32, i32 }
+%s.9 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%s.10 = type { i32, i32 }
+%s.11 = type { i32, i32, i32, i32, i32, i32, i32, i32 }
+%s.12 = type { i32, i32, i32, i32, i32, i32, i32 }
+%s.13 = type { i32, i32, i32, %s.14*, %s.14*, %s.14*, %s.14*, i32 }
+%s.14 = type { %s.14*, i8, i32, %s.4*, i32, %s.4* }
+%s.15 = type { %s.16, %s.17, %s.19, %s.20, %s.21, %s.24 }
+%s.16 = type { i64, i64, i64, i32 }
+%s.17 = type { i16, i16, i8, [4 x %s.18], i8, i8 }
+%s.18 = type { i32, i32 }
+%s.19 = type { i32*, i32, i32* }
+%s.20 = type { i8, i8, i32, i32, i8, i32, i32, i32, i32, i32 }
+%s.21 = type { i32, %s.22 }
+%s.22 = type { %s.23 }
+%s.23 = type { i32, i32, i32, i32, i32, i32, i32 }
+%s.24 = type { %s.25 }
+%s.25 = type { i32, i32, i32, i32, i32, i32, i32 }
+%s.26 = type { %s.27 }
+%s.27 = type { i16, i16, i32, i32, i32 }
+
+; Function Attrs: nounwind
+define hidden void @f0() local_unnamed_addr #0 {
+b0:
+  %v0 = load i64, i64* undef, align 8
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v1 = trunc i64 %v0 to i32
+  %v2 = inttoptr i32 %v1 to %s.0*
+  %v3 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 8
+  store i8 0, i8* %v3, align 8
+  %v4 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 9
+  store i8 1, i8* %v4, align 1
+  %v5 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 6
+  store i8 1, i8* %v5, align 1
+  unreachable
+
+b2:                                               ; preds = %b0
+  unreachable
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/store1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/store1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/store1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/store1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,39 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Check that the immediate form for the store instructions are generated.
+;
+; CHECK: memw(r{{[0-9]+}}+#156) = #0
+; CHECK: memw(r{{[0-9]+}}+#160) = ##g0+144
+; CHECK: memw(r{{[0-9]+}}+#172) = ##f3
+
+%s.0 = type { [156 x i8], i8*, i8*, i8, i8*, void (i8*)*, i8 }
+
+ at g0 = common global %s.0 zeroinitializer, align 4
+
+; Function Attrs: nounwind
+define void @f0(%s.0* %a0) #0 {
+b0:
+  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 1
+  store i8* null, i8** %v0, align 4
+  ret void
+}
+
+; Function Attrs: nounwind
+define void @f1(%s.0* %a0) #0 {
+b0:
+  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 2
+  store i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0, i32 144), i8** %v0, align 4
+  ret void
+}
+
+; Function Attrs: nounwind
+define void @f2(%s.0* %a0) #0 {
+b0:
+  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 5
+  store void (i8*)* @f3, void (i8*)** %v0, align 4
+  ret void
+}
+
+declare void @f3(i8*)
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/store_abs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/store_abs.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/store_abs.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/store_abs.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,27 @@
+; RUN: llc -march=hexagon -O3 < %s
+; REQUIRES: asserts
+
+; Test that the compiler doesn't assert when attempting to
+; generate a store absolute set insturction where the base
+; register and destination register are same.
+
+target triple = "hexagon-unknown--elf"
+
+%s.0 = type { %s.1, %s.2 }
+%s.1 = type { %s.1*, %s.1* }
+%s.2 = type { %s.3 }
+%s.3 = type { %s.4 }
+%s.4 = type { %s.5, i32, i32, i8* }
+%s.5 = type { i32 }
+
+ at g0 = external global %s.0, align 4
+
+; Function Attrs: nounwind
+define void @f0() #0 section ".init.text" {
+b0:
+  store %s.1* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0), %s.1** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0, i32 0), align 4
+  store %s.1* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0), %s.1** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0, i32 1), align 4
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/struct-const.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/struct-const.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/struct-const.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/struct-const.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,72 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Look for only one declaration of the const struct.
+; CHECK: g0:
+; CHECK-NOT: g02:
+
+target triple = "hexagon"
+
+%s.8 = type { %s.9, i8*, i8* }
+%s.9 = type { i16, i16, i32 }
+%s.0 = type { i32, %s.1*, %s.1*, i32, i32, i32, i32, i32, {}*, void (i8*)*, void (i8*)*, i8*, [32 x %s.4], i32, i16, i16, i16, i16, [16 x %s.7], i16 }
+%s.1 = type { i16, i8, i8, i32, %s.2 }
+%s.2 = type { %s.3, i8* }
+%s.3 = type { i8* }
+%s.4 = type { %s.5*, %s.5*, i16, i32 }
+%s.5 = type { %s.6, %s.5* }
+%s.6 = type { i16, i8, i8, i32 }
+%s.7 = type { %s.1*, i32 }
+%s.11 = type { i32, %s.12* }
+%s.12 = type opaque
+
+ at g0 = internal constant %s.8 { %s.9 { i16 531, i16 0, i32 16 }, i8* getelementptr inbounds ([48 x i8], [48 x i8]* @g1, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @g2, i32 0, i32 0) }, align 4
+ at g1 = private unnamed_addr constant [48 x i8] c"In task 0x%x, Assertion heap_ptr != NULL failed\00", align 8
+ at g2 = private unnamed_addr constant [10 x i8] c"xxxxxxx.c\00", align 8
+
+; Function Attrs: nounwind
+define void @f0(%s.0* %a0) #0 {
+b0:
+  %v0 = icmp eq %s.0* %a0, null
+  br i1 %v0, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v1 = tail call %s.11* @f1() #0
+  %v2 = icmp eq %s.11* %v1, null
+  br i1 %v2, label %b3, label %b2
+
+b2:                                               ; preds = %b1
+  %v3 = ptrtoint %s.11* %v1 to i32
+  tail call void @f2(%s.8* @g0, i32 %v3, i32 0, i32 0) #0
+  br label %b5
+
+b3:                                               ; preds = %b1
+  tail call void @f3(%s.8* @g0) #0
+  br label %b5
+
+b4:                                               ; preds = %b0
+  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 9
+  store void (i8*)* @f4, void (i8*)** %v4, align 4, !tbaa !0
+  %v5 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 10
+  store void (i8*)* @f5, void (i8*)** %v5, align 4, !tbaa !0
+  br label %b5
+
+b5:                                               ; preds = %b4, %b3, %b2
+  ret void
+}
+
+declare %s.11* @f1()
+
+declare void @f2(%s.8*, i32, i32, i32)
+
+declare void @f3(%s.8*)
+
+declare void @f4(i8*)
+
+declare void @f5(i8*)
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/struct_copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/struct_copy.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/struct_copy.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/struct_copy.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,82 @@
+; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 < %s | FileCheck %s
+; Disable small-data, or otherwise g3 will end up in .sdata. While that is
+; not a problem, this test was originally written with the g3 not being in
+; there, so keep it that way.
+
+%s.0 = type { i32, i32, i32, i32, i32, i32 }
+%s.1 = type { i64, i64, i64, i64, i64, i64 }
+%s.2 = type { i16, i16, i16, i16, i16, i16 }
+%s.3 = type { i8, i8, i8, i8, i8, i8 }
+
+ at g0 = external global %s.0
+ at g1 = external global %s.1
+ at g2 = external global %s.2
+ at g3 = external global %s.3
+
+; CHECK-LABEL: f0:
+; CHECK: [[REG1:(r[0-9]+)]] = {{[#]+}}g0
+; CHECK: r{{[0-9]+}} = memw([[REG1]]+#{{[0-9]+}})
+; CHECK-NOT: = memd
+; CHECK: dealloc_return
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca %s.0, align 4
+  %v1 = bitcast %s.0* %v0 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v1, i8* align 4 bitcast (%s.0* @g0 to i8*), i32 24, i1 false)
+  call void @f1(%s.0* %v0) #0
+  ret i32 0
+}
+
+declare void @f1(%s.0*)
+
+; CHECK-LABEL: f2:
+; CHECK: [[REG2:(r[0-9]+)]] = {{[#]+}}g1
+; CHECK: r{{[0-9]+}}:{{[0-9]+}} = memd([[REG2]]+#{{[0-9]+}})
+; CHECK: dealloc_return
+define i32 @f2() #0 {
+b0:
+  %v0 = alloca %s.1, align 8
+  %v1 = bitcast %s.1* %v0 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v1, i8* align 8 bitcast (%s.1* @g1 to i8*), i32 48, i1 false)
+  call void @f3(%s.1* %v0) #0
+  ret i32 0
+}
+
+declare void @f3(%s.1*)
+
+; CHECK-LABEL: f4:
+; CHECK: [[REG1:(r[0-9]+)]] = {{[#]+}}g2
+; CHECK: r{{[0-9]+}} = mem{{u?}}h([[REG1]]+#{{[0-9]+}})
+; CHECK-NOT: = memd
+; CHECK: dealloc_return
+define i32 @f4() #0 {
+b0:
+  %v0 = alloca %s.2, align 2
+  %v1 = bitcast %s.2* %v0 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 %v1, i8* align 2 bitcast (%s.2* @g2 to i8*), i32 12, i1 false)
+  call void @f5(%s.2* %v0) #0
+  ret i32 0
+}
+
+declare void @f5(%s.2*)
+
+; CHECK-LABEL: f6:
+; CHECK: [[REG1:(r[0-9]+)]] = {{[#]+}}g3
+; CHECK: r{{[0-9]+}} = mem{{u?}}b([[REG1]]+#{{[0-9]+}})
+; CHECK-NOT: = memw
+; CHECK: dealloc_return
+define i32 @f6() #0 {
+b0:
+  %v0 = alloca %s.3, align 1
+  %v1 = getelementptr inbounds %s.3, %s.3* %v0, i32 0, i32 0
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %v1, i8* align 1 getelementptr inbounds (%s.3, %s.3* @g3, i32 0, i32 0), i32 6, i1 false)
+  call void @f7(%s.3* %v0) #0
+  ret i32 0
+}
+
+declare void @f7(%s.3*)
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/struct_copy_sched_r16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/struct_copy_sched_r16.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/struct_copy_sched_r16.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/struct_copy_sched_r16.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,28 @@
+; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 < %s | FileCheck %s
+; Disable small-data, or the test will need to be modified to account for g0
+; being placed there.
+
+%s.3 = type { i8, i8, i8, i8, i8, i8 }
+
+ at g0 = external global %s.3
+
+; CHECK: [[REG1:(r[0-9]+)]] = {{[#]+}}g0
+; CHECK: r{{[0-9]+}} = mem{{u?}}b([[REG1]]+#{{[0-9]+}})
+; CHECK: r0 = #0
+; CHECK: dealloc_return
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca %s.3, align 1
+  %v1 = getelementptr inbounds %s.3, %s.3* %v0, i32 0, i32 0
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %v1, i8* align 1 getelementptr inbounds (%s.3, %s.3* @g0, i32 0, i32 0), i32 6, i1 false)
+  call void @f1(%s.3* %v0) #0
+  ret i32 0
+}
+
+declare void @f1(%s.3*)
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/sub-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/sub-add.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/sub-add.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/sub-add.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,90 @@
+; RUN: llc -march=hexagon -enable-timing-class-latency=true < %s | FileCheck -check-prefix=CHECK-ONE %s
+; REQUIRES: asserts
+; Check there is no assert when enabling enable-timing-class-latency
+; CHECK-ONE: f0:
+
+; RUN: llc -march=hexagon < %s | FileCheck -check-prefix=CHECK %s
+; CHECK: add(r{{[0-9]*}},sub(#1,r{{[0-9]*}})
+; CHECK: call f1
+
+target triple = "hexagon"
+
+%s.0 = type { i16, i16, i16, i16*, i16, i16, i16, i16, i16, i16, i32, i32, i16, %s.1*, i16, %s.2*, i16, %s.3*, i16*, i16*, i16, i16*, i16*, i16, i16*, i16, i16*, i8*, %s.5, %s.4, %s.5, %s.5, i32, i32, i32, %s.6, i32, i32, i16, %s.7, %s.7 }
+%s.1 = type { i16, i16, i16, i16 }
+%s.2 = type { i16, i16 }
+%s.3 = type { i16, i16, i16, i16 }
+%s.4 = type { i32, i32, i16* }
+%s.5 = type { i32, i32, i32* }
+%s.6 = type { i16, i16, i16, i16 }
+%s.7 = type { i16, i16, i16, i16, i16, i16*, i16*, i16*, i8*, i16*, i16*, i16*, i8* }
+
+; Function Attrs: nounwind
+define i32 @f0(%s.0* %a0) #0 {
+b0:
+  %v0 = alloca i16, align 2
+  %v1 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 12
+  %v2 = load i16, i16* %v1, align 2, !tbaa !0
+  %v3 = icmp sgt i16 %v2, 0
+  br i1 %v3, label %b1, label %b9
+
+b1:                                               ; preds = %b0
+  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 17
+  %v5 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 29
+  br label %b2
+
+b2:                                               ; preds = %b7, %b1
+  %v6 = phi i16 [ %v2, %b1 ], [ %v23, %b7 ]
+  %v7 = phi i32 [ 0, %b1 ], [ %v25, %b7 ]
+  %v8 = phi i16 [ 1, %b1 ], [ %v26, %b7 ]
+  %v9 = load %s.3*, %s.3** %v4, align 4, !tbaa !4
+  %v10 = getelementptr inbounds %s.3, %s.3* %v9, i32 %v7, i32 0
+  %v11 = load i16, i16* %v10, align 2, !tbaa !0
+  %v12 = getelementptr inbounds %s.3, %s.3* %v9, i32 %v7, i32 1
+  %v13 = load i16, i16* %v12, align 2, !tbaa !0
+  %v14 = icmp sgt i16 %v11, %v13
+  br i1 %v14, label %b6, label %b3
+
+b3:                                               ; preds = %b2
+  %v15 = sext i16 %v11 to i32
+  %v16 = sext i16 %v13 to i32
+  %v17 = add i32 %v16, 1
+  br label %b4
+
+b4:                                               ; preds = %b4, %b3
+  %v18 = phi i32 [ %v15, %b3 ], [ %v20, %b4 ]
+  %v19 = call i32 bitcast (i32 (...)* @f1 to i32 (%s.4*, i32, i32, i16*)*)(%s.4* %v5, i32 %v7, i32 undef, i16* %v0) #0
+  %v20 = add i32 %v18, 1
+  %v21 = icmp eq i32 %v20, %v17
+  br i1 %v21, label %b5, label %b4
+
+b5:                                               ; preds = %b4
+  %v22 = load i16, i16* %v1, align 2, !tbaa !0
+  br label %b6
+
+b6:                                               ; preds = %b5, %b2
+  %v23 = phi i16 [ %v22, %b5 ], [ %v6, %b2 ]
+  %v24 = icmp slt i16 %v8, %v23
+  br i1 %v24, label %b7, label %b8
+
+b7:                                               ; preds = %b6
+  %v25 = sext i16 %v8 to i32
+  %v26 = add i16 %v8, 1
+  br label %b2
+
+b8:                                               ; preds = %b6
+  br label %b9
+
+b9:                                               ; preds = %b8, %b0
+  ret i32 0
+}
+
+declare i32 @f1(...)
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"any pointer", !2}

Added: llvm/trunk/test/CodeGen/Hexagon/subh-shifted.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/subh-shifted.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/subh-shifted.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/subh-shifted.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,24 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]+}} = sub(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{L|l}}):<<16
+
+; Function Attrs: nounwind readnone
+define i64 @f0(i64 %a0, i16 zeroext %a1, i16 zeroext %a2) #0 {
+b0:
+  %v0 = zext i16 %a1 to i32
+  %v1 = zext i16 %a2 to i32
+  %v2 = sub nsw i32 %v0, %v1
+  %v3 = shl i32 %v2, 16
+  %v4 = icmp slt i32 %v3, 65536
+  %v5 = ashr exact i32 %v3, 16
+  %v6 = select i1 %v4, i32 1, i32 %v5
+  %v7 = icmp sgt i32 %v6, 4
+  %v8 = add i32 %v6, 65535
+  %v9 = shl i64 %a0, 2
+  %v10 = and i32 %v8, 65535
+  %v11 = zext i32 %v10 to i64
+  %v12 = select i1 %v7, i64 3, i64 %v11
+  %v13 = or i64 %v12, %v9
+  ret i64 %v13
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/subh.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/subh.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/subh.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/subh.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,24 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]+}} = sub(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{L|l}})
+
+; Function Attrs: nounwind readnone
+define i64 @f0(i64 %a0, i16 zeroext %a1, i16 zeroext %a2) #0 {
+b0:
+  %v0 = zext i16 %a1 to i32
+  %v1 = zext i16 %a2 to i32
+  %v2 = sub nsw i32 %v0, %v1
+  %v3 = shl i32 %v2, 16
+  %v4 = icmp slt i32 %v3, 65536
+  %v5 = ashr exact i32 %v3, 16
+  %v6 = select i1 %v4, i32 1, i32 %v5
+  %v7 = icmp sgt i32 %v6, 4
+  %v8 = add i32 %v6, 65535
+  %v9 = shl i64 %a0, 2
+  %v10 = and i32 %v8, 65535
+  %v11 = zext i32 %v10 to i64
+  %v12 = select i1 %v7, i64 3, i64 %v11
+  %v13 = or i64 %v12, %v9
+  ret i64 %v13
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swiz.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swiz.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swiz.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swiz.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,51 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: swiz
+
+target triple = "hexagon"
+
+%s.0 = type { [4 x i32], [2 x i32], [64 x i8] }
+
+; Function Attrs: nounwind
+define void @f0(%s.0* nocapture %a0, i8* nocapture %a1, i32 %a2) #0 {
+b0:
+  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 2, i32 0
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = phi i16 [ 16, %b0 ], [ %v22, %b1 ]
+  %v2 = phi i8* [ %v0, %b0 ], [ %v21, %b1 ]
+  %v3 = getelementptr inbounds i8, i8* %v2, i32 3
+  %v4 = load i8, i8* %v3, align 1, !tbaa !0
+  %v5 = zext i8 %v4 to i32
+  %v6 = shl nuw nsw i32 %v5, 8
+  %v7 = getelementptr inbounds i8, i8* %v2, i32 2
+  %v8 = load i8, i8* %v7, align 1, !tbaa !0
+  %v9 = zext i8 %v8 to i32
+  %v10 = or i32 %v6, %v9
+  %v11 = shl nuw i32 %v10, 16
+  %v12 = getelementptr inbounds i8, i8* %v2, i32 1
+  %v13 = load i8, i8* %v12, align 1, !tbaa !0
+  %v14 = zext i8 %v13 to i32
+  %v15 = shl nuw nsw i32 %v14, 8
+  %v16 = load i8, i8* %v2, align 1, !tbaa !0
+  %v17 = zext i8 %v16 to i32
+  %v18 = or i32 %v11, %v15
+  %v19 = or i32 %v18, %v17
+  %v20 = bitcast i8* %v2 to i32*
+  store i32 %v19, i32* %v20, align 4, !tbaa !3
+  %v21 = getelementptr inbounds i8, i8* %v2, i32 4
+  %v22 = add i16 %v1, -1
+  %v23 = icmp eq i16 %v22, 0
+  br i1 %v23, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  ret void
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!4, !4, i64 0}
+!4 = !{!"int", !1}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-badorder.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-badorder.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-badorder.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-badorder.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,38 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Function Attrs: nounwind
+define void @f0(i32* nocapture %a0) #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i64 [ %v9, %b1 ], [ 0, %b0 ]
+  %v1 = phi i32 [ %v10, %b1 ], [ 0, %b0 ]
+  %v2 = getelementptr inbounds i32, i32* %a0, i32 %v1
+  %v3 = load i32, i32* %v2, align 4, !tbaa !0
+  %v4 = zext i32 %v3 to i64
+  %v5 = load i32, i32* undef, align 4, !tbaa !0
+  %v6 = zext i32 %v5 to i64
+  %v7 = shl nuw i64 %v6, 32
+  %v8 = or i64 %v7, %v4
+  %v9 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v0, i64 %v8, i64 %v8)
+  %v10 = add nsw i32 %v1, 4
+  %v11 = icmp slt i32 %v10, undef
+  br i1 %v11, label %b1, label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v12 = phi i64 [ 0, %b0 ], [ %v9, %b1 ]
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vdmacs.s0(i64, i64, i64) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-chain-refs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-chain-refs.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-chain-refs.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-chain-refs.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,65 @@
+; RUN: llc -march=hexagon -enable-pipeliner=true -stats -o /dev/null < %s \
+; RUN:      2>&1 | FileCheck %s --check-prefix=STATS
+
+; Test that we do not schedule chained references too far apart,
+; which enables the loop to be pipelined. In this test, the loop should
+; not be pipelined when the chained references are constrained correctly.
+
+; STATS-NOT: 1 pipeliner   - Number of loops software pipelined
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br label %b2
+
+b1:                                               ; preds = %b6
+  br label %b7
+
+b2:                                               ; preds = %b6, %b0
+  br label %b3
+
+b3:                                               ; preds = %b5, %b2
+  br i1 undef, label %b4, label %b5
+
+b4:                                               ; preds = %b3
+  br label %b5
+
+b5:                                               ; preds = %b4, %b3
+  br i1 undef, label %b3, label %b6
+
+b6:                                               ; preds = %b5
+  br i1 undef, label %b1, label %b2
+
+b7:                                               ; preds = %b7, %b1
+  %v0 = phi i32 [ 0, %b1 ], [ %v4, %b7 ]
+  %v1 = load i16, i16* undef, align 8, !tbaa !0
+  %v2 = icmp sgt i16 %v1, undef
+  %v3 = select i1 %v2, i16 4, i16 undef
+  store i16 %v3, i16* undef, align 2, !tbaa !0
+  store i16 -32768, i16* undef, align 2, !tbaa !0
+  %v4 = add i32 %v0, 1
+  %v5 = icmp eq i32 %v4, 5
+  br i1 %v5, label %b8, label %b7
+
+b8:                                               ; preds = %b7
+  br i1 undef, label %b9, label %b10
+
+b9:                                               ; preds = %b8
+  br label %b10
+
+b10:                                              ; preds = %b9, %b8
+  br i1 undef, label %b11, label %b12
+
+b11:                                              ; preds = %b10
+  unreachable
+
+b12:                                              ; preds = %b10
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-change-dep-cycle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-change-dep-cycle.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-change-dep-cycle.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-change-dep-cycle.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,42 @@
+; RUN: llc -march=hexagon -O3 < %s
+; REQUIRES: asserts
+
+; Don't change the dependences if it's going to cause a cycle.
+
+; Function Attrs: nounwind
+define void @f0(i8* nocapture %a0, i32 %a1) #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i8* [ undef, %b1 ], [ undef, %b0 ]
+  %v1 = phi i32 [ %v20, %b1 ], [ 1, %b0 ]
+  %v2 = phi i8* [ %v6, %b1 ], [ %a0, %b0 ]
+  %v3 = load i8, i8* %v2, align 1
+  %v4 = zext i8 %v3 to i32
+  %v5 = mul nsw i32 %v4, 3
+  %v6 = getelementptr inbounds i8, i8* %v2, i32 1
+  %v7 = load i8, i8* %v6, align 1
+  %v8 = zext i8 %v7 to i32
+  %v9 = add i32 %v8, 2
+  %v10 = add i32 %v9, %v5
+  %v11 = lshr i32 %v10, 2
+  %v12 = trunc i32 %v11 to i8
+  %v13 = getelementptr inbounds i8, i8* undef, i32 2
+  store i8 %v12, i8* %v0, align 1
+  %v14 = load i8, i8* %v2, align 1
+  %v15 = zext i8 %v14 to i32
+  %v16 = add i32 %v15, 2
+  %v17 = add i32 %v16, 0
+  %v18 = lshr i32 %v17, 2
+  %v19 = trunc i32 %v18 to i8
+  store i8 %v19, i8* %v13, align 1
+  %v20 = add i32 %v1, 1
+  %v21 = icmp eq i32 %v20, %a1
+  br i1 %v21, label %b2, label %b1
+
+b2:                                               ; preds = %b1, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-change-dep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-change-dep.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-change-dep.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-change-dep.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,64 @@
+; RUN: llc -march=hexagon -enable-aa-sched-mi -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0
+  br i1 undef, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  unreachable
+
+b4:                                               ; preds = %b2
+  br i1 undef, label %b5, label %b6
+
+b5:                                               ; preds = %b4
+  unreachable
+
+b6:                                               ; preds = %b4
+  br label %b7
+
+b7:                                               ; preds = %b7, %b6
+  br i1 undef, label %b8, label %b7
+
+b8:                                               ; preds = %b7
+  br i1 undef, label %b15, label %b9
+
+b9:                                               ; preds = %b8
+  br label %b10
+
+b10:                                              ; preds = %b10, %b9
+  br i1 undef, label %b11, label %b10
+
+b11:                                              ; preds = %b10
+  br label %b12
+
+b12:                                              ; preds = %b12, %b11
+  br i1 undef, label %b13, label %b12
+
+b13:                                              ; preds = %b13, %b12
+  %v0 = phi i32 [ %v5, %b13 ], [ 0, %b12 ]
+  %v1 = getelementptr inbounds [11 x i32], [11 x i32]* undef, i32 0, i32 %v0
+  %v2 = load i32, i32* %v1, align 4
+  %v3 = add i32 %v2, 1
+  %v4 = lshr i32 %v3, 1
+  store i32 %v4, i32* %v1, align 4
+  store i32 0, i32* %v1, align 4
+  %v5 = add nsw i32 %v0, 1
+  %v6 = icmp eq i32 %v5, 11
+  br i1 %v6, label %b14, label %b13
+
+b14:                                              ; preds = %b13
+  br label %b15
+
+b15:                                              ; preds = %b14, %b8
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-const-tc2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-const-tc2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-const-tc2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-const-tc2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,36 @@
+; RUN: llc -march=hexagon -rdf-opt=0 < %s | FileCheck %s
+
+; Test that we fixup a pipelined loop correctly when the number of
+; stages is greater than the compile-time loop trip count. In this
+; test, there are two prolog stages, but the loop executes only once.
+; In the bug, the final CFG contains two iterations of the loop.
+
+; CHECK-NOT: loop0
+; CHECK: = mpy
+; CHECK-NOT: = mpy
+
+define void @f0() {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ 0, %b0 ], [ %v9, %b1 ]
+  %v1 = phi i32 [ 0, %b0 ], [ %v8, %b1 ]
+  %v2 = load i32, i32* undef, align 4
+  %v3 = add nsw i32 %v1, 1
+  %v4 = srem i32 %v2, 3
+  %v5 = icmp ne i32 %v4, 0
+  %v6 = sub nsw i32 0, %v2
+  %v7 = select i1 %v5, i32 %v6, i32 %v2
+  %v8 = mul nsw i32 %v3, %v7
+  %v9 = add nsw i32 %v0, 1
+  %v10 = icmp eq i32 %v9, 1
+  br i1 %v10, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v11 = phi i32 [ %v8, %b1 ]
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  br label %b3
+}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-const-tc3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-const-tc3.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-const-tc3.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-const-tc3.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,107 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that the pipeliner correctly fixes up the pipelined CFG when the loop
+; has a constant trip count, and the trip count is less than the number of
+; prolog blocks. Prior to the bug, the pipeliner deleted one extra prolog and
+; epilog stage. We check this by counting the number of sxth instructions.
+
+; CHECK: r{{[0-9]+}} = sxth(r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = sxth(r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = sxth(r{{[0-9]+}})
+; CHECK: r{{[0-9]+}} = sxth(r{{[0-9]+}})
+
+; Function Attrs: nounwind readonly
+define signext i16 @f0(i16* nocapture readonly %a0, i16* nocapture readnone %a1, i16* nocapture readonly %a2, i16* nocapture readonly %a3, i16 signext %a4, i16 signext %a5, i16 signext %a6) #0 {
+b0:
+  %v0 = icmp sgt i16 %a5, 0
+  br i1 %v0, label %b1, label %b7
+
+b1:                                               ; preds = %b0
+  %v1 = load i16, i16* %a0, align 2
+  %v2 = sext i16 %v1 to i32
+  %v3 = load i16, i16* %a3, align 2
+  %v4 = sext i16 %v3 to i32
+  br label %b2
+
+b2:                                               ; preds = %b6, %b1
+  %v5 = phi i32 [ 2147483647, %b1 ], [ %v44, %b6 ]
+  %v6 = phi i16 [ 0, %b1 ], [ %v45, %b6 ]
+  %v7 = phi i16 [ 0, %b1 ], [ %v43, %b6 ]
+  %v8 = phi i16* [ %a2, %b1 ], [ %v38, %b6 ]
+  %v9 = load i16, i16* %v8, align 2
+  %v10 = sext i16 %v9 to i32
+  %v11 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v2, i32 %v10)
+  %v12 = shl i32 %v11, 16
+  %v13 = ashr exact i32 %v12, 16
+  %v14 = tail call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %v13, i32 %v4)
+  %v15 = tail call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %v14, i32 %v13)
+  %v16 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v15, i32 10)
+  %v17 = getelementptr inbounds i16, i16* %v8, i32 1
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v18 = phi i16* [ %v8, %b2 ], [ %v19, %b3 ]
+  %v19 = phi i16* [ %v17, %b2 ], [ %v38, %b3 ]
+  %v20 = phi i32 [ %v16, %b2 ], [ %v36, %b3 ]
+  %v21 = phi i32 [ 1, %b2 ], [ %v37, %b3 ]
+  %v22 = getelementptr inbounds i16, i16* %a0, i32 %v21
+  %v23 = load i16, i16* %v22, align 2
+  %v24 = sext i16 %v23 to i32
+  %v25 = load i16, i16* %v19, align 2
+  %v26 = sext i16 %v25 to i32
+  %v27 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v24, i32 %v26)
+  %v28 = shl i32 %v27, 16
+  %v29 = ashr exact i32 %v28, 16
+  %v30 = getelementptr inbounds i16, i16* %a3, i32 %v21
+  %v31 = load i16, i16* %v30, align 2
+  %v32 = sext i16 %v31 to i32
+  %v33 = tail call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %v29, i32 %v32)
+  %v34 = tail call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %v33, i32 %v29)
+  %v35 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v34, i32 10)
+  %v36 = tail call i32 @llvm.hexagon.A2.addsat(i32 %v20, i32 %v35)
+  %v37 = add i32 %v21, 1
+  %v38 = getelementptr inbounds i16, i16* %v18, i32 2
+  %v39 = icmp eq i32 %v37, 3
+  br i1 %v39, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  %v40 = tail call i32 @llvm.hexagon.A2.subsat(i32 %v36, i32 %v5)
+  %v41 = icmp slt i32 %v40, 0
+  br i1 %v41, label %b5, label %b6
+
+b5:                                               ; preds = %b4
+  %v42 = tail call i32 @llvm.hexagon.A2.addsat(i32 %v36, i32 0)
+  br label %b6
+
+b6:                                               ; preds = %b5, %b4
+  %v43 = phi i16 [ %v6, %b5 ], [ %v7, %b4 ]
+  %v44 = phi i32 [ %v42, %b5 ], [ %v5, %b4 ]
+  %v45 = add i16 %v6, 1
+  %v46 = icmp eq i16 %v45, %a5
+  br i1 %v46, label %b7, label %b2
+
+b7:                                               ; preds = %b6, %b0
+  %v47 = phi i16 [ 0, %b0 ], [ %v43, %b6 ]
+  ret i16 %v47
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M2.hmmpyl.s1(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.addsat(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.subsat(i32, i32) #1
+
+attributes #0 = { nounwind readonly "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-cse-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-cse-phi.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-cse-phi.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-cse-phi.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,52 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; This test checks that we don't assert when the Phi value from the
+; loop is actually defined prior to the loop, e.g., from CSE.
+
+define fastcc void @f0() {
+b0:
+  br i1 undef, label %b10, label %b1
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b3, label %b2
+
+b2:                                               ; preds = %b1
+  br label %b8
+
+b3:                                               ; preds = %b1
+  br i1 undef, label %b4, label %b6
+
+b4:                                               ; preds = %b3
+  %v0 = load i16, i16* undef, align 2
+  br label %b7
+
+b5:                                               ; preds = %b7
+  br label %b6
+
+b6:                                               ; preds = %b5, %b3
+  %v1 = phi i16 [ %v9, %b5 ], [ 0, %b3 ]
+  br i1 undef, label %b10, label %b9
+
+b7:                                               ; preds = %b7, %b4
+  %v2 = phi i16 [ 0, %b7 ], [ %v0, %b4 ]
+  %v3 = phi i16 [ %v9, %b7 ], [ 0, %b4 ]
+  %v4 = phi i32 [ %v10, %b7 ], [ undef, %b4 ]
+  %v5 = or i16 0, %v3
+  %v6 = or i16 0, %v5
+  %v7 = or i16 0, %v6
+  %v8 = lshr i16 %v2, 8
+  %v9 = or i16 %v8, %v7
+  %v10 = add nsw i32 %v4, -32
+  %v11 = icmp sgt i32 %v10, 31
+  br i1 %v11, label %b7, label %b5
+
+b8:                                               ; preds = %b8, %b2
+  br label %b8
+
+b9:                                               ; preds = %b6
+  br label %b10
+
+b10:                                              ; preds = %b9, %b6, %b0
+  ret void
+}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-dag-phi1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-dag-phi1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-dag-phi1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-dag-phi1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,35 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; This test check that a dependence is created between a Phi and it's uses.
+; An assert occurs if the Phi dependences are not correct.
+
+define void @f0(float* nocapture %a0, i32 %a1) #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi float [ %v1, %b1 ], [ undef, %b0 ]
+  %v1 = phi float [ %v13, %b1 ], [ undef, %b0 ]
+  %v2 = phi float* [ null, %b1 ], [ %a0, %b0 ]
+  %v3 = phi i32 [ %v14, %b1 ], [ 0, %b0 ]
+  %v4 = phi float [ %v5, %b1 ], [ undef, %b0 ]
+  %v5 = load float, float* %v2, align 4
+  %v6 = fmul float %v1, 0x3FFFA98000000000
+  %v7 = fmul float %v0, 0xBFEF550000000000
+  %v8 = fadd float %v6, %v7
+  %v9 = fmul float %v5, 0x3FEFAA0000000000
+  %v10 = fadd float %v8, %v9
+  %v11 = fmul float %v4, 0xBFFFAA0000000000
+  %v12 = fadd float %v11, %v10
+  %v13 = fadd float undef, %v12
+  store float %v13, float* %v2, align 4
+  %v14 = add nsw i32 %v3, 1
+  %v15 = icmp eq i32 %v14, %a1
+  br i1 %v15, label %b2, label %b1
+
+b2:                                               ; preds = %b1, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-dead-regseq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-dead-regseq.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-dead-regseq.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-dead-regseq.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,41 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Check that a dead REG_SEQUENCE doesn't ICE.
+
+; Function Attrs: nounwind
+define void @f0(i32* nocapture %a0, i32 %a1) #0 {
+b0:
+  %v0 = mul nsw i32 %a1, 4
+  %v1 = icmp sgt i32 %v0, 0
+  br i1 %v1, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v2 = phi i32 [ %v11, %b1 ], [ 0, %b0 ]
+  %v3 = load i32, i32* null, align 4
+  %v4 = zext i32 %v3 to i64
+  %v5 = getelementptr inbounds i32, i32* %a0, i32 0
+  %v6 = load i32, i32* %v5, align 4
+  %v7 = zext i32 %v6 to i64
+  %v8 = shl nuw i64 %v7, 32
+  %v9 = or i64 %v8, %v4
+  %v10 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 0, i64 %v9, i64 %v9)
+  %v11 = add nsw i32 %v2, 4
+  %v12 = icmp slt i32 %v11, %v0
+  br i1 %v12, label %b1, label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v13 = phi i64 [ 0, %b0 ], [ %v10, %b1 ]
+  %v14 = tail call i64 @llvm.hexagon.S2.asr.r.vw(i64 %v13, i32 6)
+  store i64 %v14, i64* null, align 8
+  unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vdmacs.s0(i64, i64, i64) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.asr.r.vw(i64, i32) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-disable-Os.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-disable-Os.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-disable-Os.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-disable-Os.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,130 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: loop0(.LBB0_{{[0-9]+}},#347)
+
+target triple = "hexagon"
+
+; Function Attrs: norecurse nounwind optsize readonly
+define i32 @f0(i32 %a0, i8* nocapture readonly %a1, i32 %a2) local_unnamed_addr #0 {
+b0:
+  %v0 = lshr i32 %a0, 16
+  %v1 = and i32 %a0, 65535
+  %v2 = icmp ugt i32 %a2, 5551
+  br i1 %v2, label %b1, label %b4
+
+b1:                                               ; preds = %b0, %b3
+  %v3 = phi i32 [ %v96, %b3 ], [ %v0, %b0 ]
+  %v4 = phi i32 [ %v7, %b3 ], [ %a2, %b0 ]
+  %v5 = phi i8* [ %v94, %b3 ], [ %a1, %b0 ]
+  %v6 = phi i32 [ %v95, %b3 ], [ %v1, %b0 ]
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v8 = phi i32 [ %v6, %b1 ], [ %v89, %b2 ]
+  %v9 = phi i8* [ %v5, %b1 ], [ %v91, %b2 ]
+  %v10 = phi i32 [ %v3, %b1 ], [ %v90, %b2 ]
+  %v11 = phi i32 [ 347, %b1 ], [ %v92, %b2 ]
+  %v12 = load i8, i8* %v9, align 1, !tbaa !0
+  %v13 = zext i8 %v12 to i32
+  %v14 = add i32 %v8, %v13
+  %v15 = add i32 %v14, %v10
+  %v16 = getelementptr inbounds i8, i8* %v9, i32 1
+  %v17 = load i8, i8* %v16, align 1, !tbaa !0
+  %v18 = zext i8 %v17 to i32
+  %v19 = add i32 %v14, %v18
+  %v20 = add i32 %v15, %v19
+  %v21 = getelementptr inbounds i8, i8* %v9, i32 2
+  %v22 = load i8, i8* %v21, align 1, !tbaa !0
+  %v23 = zext i8 %v22 to i32
+  %v24 = add i32 %v19, %v23
+  %v25 = add i32 %v20, %v24
+  %v26 = getelementptr inbounds i8, i8* %v9, i32 3
+  %v27 = load i8, i8* %v26, align 1, !tbaa !0
+  %v28 = zext i8 %v27 to i32
+  %v29 = add i32 %v24, %v28
+  %v30 = add i32 %v25, %v29
+  %v31 = getelementptr inbounds i8, i8* %v9, i32 4
+  %v32 = load i8, i8* %v31, align 1, !tbaa !0
+  %v33 = zext i8 %v32 to i32
+  %v34 = add i32 %v29, %v33
+  %v35 = add i32 %v30, %v34
+  %v36 = getelementptr inbounds i8, i8* %v9, i32 5
+  %v37 = load i8, i8* %v36, align 1, !tbaa !0
+  %v38 = zext i8 %v37 to i32
+  %v39 = add i32 %v34, %v38
+  %v40 = add i32 %v35, %v39
+  %v41 = getelementptr inbounds i8, i8* %v9, i32 6
+  %v42 = load i8, i8* %v41, align 1, !tbaa !0
+  %v43 = zext i8 %v42 to i32
+  %v44 = add i32 %v39, %v43
+  %v45 = add i32 %v40, %v44
+  %v46 = getelementptr inbounds i8, i8* %v9, i32 7
+  %v47 = load i8, i8* %v46, align 1, !tbaa !0
+  %v48 = zext i8 %v47 to i32
+  %v49 = add i32 %v44, %v48
+  %v50 = add i32 %v45, %v49
+  %v51 = getelementptr inbounds i8, i8* %v9, i32 8
+  %v52 = load i8, i8* %v51, align 1, !tbaa !0
+  %v53 = zext i8 %v52 to i32
+  %v54 = add i32 %v49, %v53
+  %v55 = add i32 %v50, %v54
+  %v56 = getelementptr inbounds i8, i8* %v9, i32 9
+  %v57 = load i8, i8* %v56, align 1, !tbaa !0
+  %v58 = zext i8 %v57 to i32
+  %v59 = add i32 %v54, %v58
+  %v60 = add i32 %v55, %v59
+  %v61 = getelementptr inbounds i8, i8* %v9, i32 10
+  %v62 = load i8, i8* %v61, align 1, !tbaa !0
+  %v63 = zext i8 %v62 to i32
+  %v64 = add i32 %v59, %v63
+  %v65 = add i32 %v60, %v64
+  %v66 = getelementptr inbounds i8, i8* %v9, i32 11
+  %v67 = load i8, i8* %v66, align 1, !tbaa !0
+  %v68 = zext i8 %v67 to i32
+  %v69 = add i32 %v64, %v68
+  %v70 = add i32 %v65, %v69
+  %v71 = getelementptr inbounds i8, i8* %v9, i32 12
+  %v72 = load i8, i8* %v71, align 1, !tbaa !0
+  %v73 = zext i8 %v72 to i32
+  %v74 = add i32 %v69, %v73
+  %v75 = add i32 %v70, %v74
+  %v76 = getelementptr inbounds i8, i8* %v9, i32 13
+  %v77 = load i8, i8* %v76, align 1, !tbaa !0
+  %v78 = zext i8 %v77 to i32
+  %v79 = add i32 %v74, %v78
+  %v80 = add i32 %v75, %v79
+  %v81 = getelementptr inbounds i8, i8* %v9, i32 14
+  %v82 = load i8, i8* %v81, align 1, !tbaa !0
+  %v83 = zext i8 %v82 to i32
+  %v84 = add i32 %v79, %v83
+  %v85 = add i32 %v80, %v84
+  %v86 = getelementptr inbounds i8, i8* %v9, i32 15
+  %v87 = load i8, i8* %v86, align 1, !tbaa !0
+  %v88 = zext i8 %v87 to i32
+  %v89 = add i32 %v84, %v88
+  %v90 = add i32 %v85, %v89
+  %v91 = getelementptr inbounds i8, i8* %v9, i32 16
+  %v92 = add nsw i32 %v11, -1
+  %v93 = icmp eq i32 %v92, 0
+  br i1 %v93, label %b3, label %b2
+
+b3:                                               ; preds = %b2
+  %v7 = add i32 %v4, -5552
+  %v94 = getelementptr i8, i8* %v5, i32 5552
+  %v95 = urem i32 %v89, 65521
+  %v96 = urem i32 %v90, 65521
+  %v97 = icmp ugt i32 %v7, 5551
+  br i1 %v97, label %b1, label %b4
+
+b4:                                               ; preds = %b3, %b0
+  %v98 = phi i32 [ %v0, %b0 ], [ %v96, %b3 ]
+  %v99 = phi i32 [ %v1, %b0 ], [ %v95, %b3 ]
+  %v100 = shl nuw i32 %v98, 16
+  %v101 = or i32 %v100, %v99
+  ret i32 %v101
+}
+
+attributes #0 = { norecurse nounwind optsize readonly "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi4.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi4.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi4.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,41 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s | FileCheck %s
+
+; Test that we generate the correct value for a Phi in the epilog
+; that is for a value defined two stages earlier. An extra copy in the
+; epilog means the schedule is incorrect.
+
+; CHECK: endloop0
+; CHECK-NOT: r{{[0-9]+}} = r{{[0-9]+}}
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0, i32* %a1, [1000 x i32]* %a2) #0 {
+b0:
+  br i1 undef, label %b1, label %b3
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ %v8, %b1 ], [ 1, %b0 ]
+  %v1 = load i32, i32* null, align 4, !tbaa !0
+  %v2 = getelementptr inbounds i32, i32* %a1, i32 %v0
+  %v3 = load i32, i32* %v2, align 4, !tbaa !0
+  %v4 = load i32, i32* undef, align 4, !tbaa !0
+  %v5 = mul nsw i32 %v4, %v3
+  %v6 = add nsw i32 %v5, %v1
+  %v7 = getelementptr inbounds [1000 x i32], [1000 x i32]* %a2, i32 %v0, i32 0
+  store i32 %v6, i32* %v7, align 4, !tbaa !0
+  %v8 = add nsw i32 %v0, 1
+  %v9 = icmp eq i32 %v8, %a0
+  br i1 %v9, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  unreachable
+
+b3:                                               ; preds = %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"long", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi5.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi5.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi5.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,184 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that we use the correct name in an epilog phi for a phi value
+; that is defined for the last time in the kernel. Previously, we
+; used the value from kernel loop definition, but we really need
+; to use the value from the Phi in the kernel instead.
+
+; In this test case, the second loop is pipelined, block b5.
+
+; CHECK: loop0
+; CHECK: [[REG0:r([0-9]+)]] += mpyi
+; CHECK-NOT: r{{[0-9]+}} += add([[REG0]],#8)
+; CHECK: endloop1
+
+%s.0 = type { %s.1*, %s.4*, %s.7*, i8*, i8, i32, %s.8*, i32, i32, i32, i8, i8, i32, i32, double, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i8, i8, i32, i32, i32, i32, i32, i32, i8**, i32, i32, i32, i32, i32, [64 x i32]*, [4 x %s.9*], [4 x %s.10*], [4 x %s.10*], i32, %s.23*, i8, i8, [16 x i8], [16 x i8], [16 x i8], i32, i8, i8, i8, i8, i16, i16, i8, i8, i8, %s.11*, i32, i32, i32, i32, i8*, i32, [4 x %s.23*], i32, i32, i32, [10 x i32], i32, i32, i32, i32, i32, %s.12*, %s.13*, %s.14*, %s.15*, %s.16*, %s.17*, %s.18*, %s.19*, %s.20*, %s.21*, %s.22* }
+%s.1 = type { void (%s.2*)*, void (%s.2*, i32)*, void (%s.2*)*, void (%s.2*, i8*)*, void (%s.2*)*, i32, %s.3, i32, i32, i8**, i32, i8**, i32, i32 }
+%s.2 = type { %s.1*, %s.4*, %s.7*, i8*, i8, i32 }
+%s.3 = type { [8 x i32], [48 x i8] }
+%s.4 = type { i8* (%s.2*, i32, i32)*, i8* (%s.2*, i32, i32)*, i8** (%s.2*, i32, i32, i32)*, [64 x i16]** (%s.2*, i32, i32, i32)*, %s.5* (%s.2*, i32, i8, i32, i32, i32)*, %s.6* (%s.2*, i32, i8, i32, i32, i32)*, {}*, i8** (%s.2*, %s.5*, i32, i32, i8)*, [64 x i16]** (%s.2*, %s.6*, i32, i32, i8)*, void (%s.2*, i32)*, {}*, i32, i32 }
+%s.5 = type opaque
+%s.6 = type opaque
+%s.7 = type { {}*, i32, i32, i32, i32 }
+%s.8 = type { i8*, i32, {}*, i8 (%s.0*)*, void (%s.0*, i32)*, i8 (%s.0*, i32)*, {}* }
+%s.9 = type { [64 x i16], i8 }
+%s.10 = type { [17 x i8], [256 x i8], i8 }
+%s.11 = type { %s.11*, i8, i32, i32, i8* }
+%s.12 = type { {}*, {}*, i8 }
+%s.13 = type { void (%s.0*, i8)*, void (%s.0*, i8**, i32*, i32)* }
+%s.14 = type { {}*, i32 (%s.0*)*, {}*, i32 (%s.0*, i8***)*, %s.6** }
+%s.15 = type { void (%s.0*, i8)*, void (%s.0*, i8***, i32*, i32, i8**, i32*, i32)* }
+%s.16 = type { i32 (%s.0*)*, {}*, {}*, {}*, i8, i8 }
+%s.17 = type { {}*, i32 (%s.0*)*, i8 (%s.0*)*, i8, i8, i32, i32 }
+%s.18 = type { {}*, i8 (%s.0*, [64 x i16]**)*, i8 }
+%s.19 = type { {}*, [5 x void (%s.0*, %s.23*, i16*, i8**, i32)*] }
+%s.20 = type { {}*, void (%s.0*, i8***, i32*, i32, i8**, i32*, i32)*, i8 }
+%s.21 = type { {}*, void (%s.0*, i8***, i32, i8**, i32)* }
+%s.22 = type { void (%s.0*, i8)*, void (%s.0*, i8**, i8**, i32)*, {}*, {}* }
+%s.23 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i32, i32, i32, i32, i32, %s.9*, i8* }
+
+; Function Attrs: nounwind optsize
+define hidden void @f0(%s.0* nocapture readonly %a0, %s.23* nocapture readonly %a1, i8** nocapture readonly %a2, i8*** nocapture readonly %a3) #0 {
+b0:
+  %v0 = load i8**, i8*** %a3, align 4
+  %v1 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 62
+  %v2 = load i32, i32* %v1, align 4
+  %v3 = icmp sgt i32 %v2, 0
+  br i1 %v3, label %b1, label %b10
+
+b1:                                               ; preds = %b0
+  %v4 = getelementptr inbounds %s.23, %s.23* %a1, i32 0, i32 10
+  br label %b2
+
+b2:                                               ; preds = %b8, %b1
+  %v5 = phi i32 [ 0, %b1 ], [ %v98, %b8 ]
+  %v6 = phi i32 [ 0, %b1 ], [ %v99, %b8 ]
+  %v7 = getelementptr inbounds i8*, i8** %a2, i32 %v6
+  br label %b3
+
+b3:                                               ; preds = %b7, %b2
+  %v8 = phi i32 [ 0, %b2 ], [ %v96, %b7 ]
+  %v9 = phi i32 [ %v5, %b2 ], [ %v16, %b7 ]
+  %v10 = load i8*, i8** %v7, align 4
+  %v11 = icmp eq i32 %v8, 0
+  %v12 = select i1 %v11, i32 -1, i32 1
+  %v13 = add i32 %v12, %v6
+  %v14 = getelementptr inbounds i8*, i8** %a2, i32 %v13
+  %v15 = load i8*, i8** %v14, align 4
+  %v16 = add nsw i32 %v9, 1
+  %v17 = getelementptr inbounds i8*, i8** %v0, i32 %v9
+  %v18 = load i8*, i8** %v17, align 4
+  %v19 = getelementptr inbounds i8, i8* %v10, i32 1
+  %v20 = load i8, i8* %v10, align 1
+  %v21 = zext i8 %v20 to i32
+  %v22 = mul nsw i32 %v21, 3
+  %v23 = getelementptr inbounds i8, i8* %v15, i32 1
+  %v24 = load i8, i8* %v15, align 1
+  %v25 = zext i8 %v24 to i32
+  %v26 = add nsw i32 %v22, %v25
+  %v27 = load i8, i8* %v19, align 1
+  %v28 = zext i8 %v27 to i32
+  %v29 = mul nsw i32 %v28, 3
+  %v30 = load i8, i8* %v23, align 1
+  %v31 = zext i8 %v30 to i32
+  %v32 = add nsw i32 %v29, %v31
+  %v33 = mul nsw i32 %v26, 4
+  %v34 = add nsw i32 %v33, 8
+  %v35 = lshr i32 %v34, 4
+  %v36 = trunc i32 %v35 to i8
+  %v37 = getelementptr inbounds i8, i8* %v18, i32 1
+  store i8 %v36, i8* %v18, align 1
+  %v38 = mul nsw i32 %v26, 3
+  %v39 = add i32 %v38, 7
+  %v40 = add i32 %v39, %v32
+  %v41 = lshr i32 %v40, 4
+  %v42 = trunc i32 %v41 to i8
+  store i8 %v42, i8* %v37, align 1
+  %v43 = load i32, i32* %v4, align 4
+  %v44 = add i32 %v43, -2
+  %v45 = getelementptr inbounds i8, i8* %v18, i32 2
+  %v46 = icmp eq i32 %v44, 0
+  br i1 %v46, label %b7, label %b4
+
+b4:                                               ; preds = %b3
+  %v47 = getelementptr inbounds i8, i8* %v15, i32 2
+  %v48 = getelementptr inbounds i8, i8* %v10, i32 2
+  %v49 = mul i32 %v43, 2
+  br label %b5
+
+b5:                                               ; preds = %b5, %b4
+  %v50 = phi i8* [ %v45, %b4 ], [ %v76, %b5 ]
+  %v51 = phi i32 [ %v44, %b4 ], [ %v75, %b5 ]
+  %v52 = phi i32 [ %v26, %b4 ], [ %v53, %b5 ]
+  %v53 = phi i32 [ %v32, %b4 ], [ %v64, %b5 ]
+  %v54 = phi i8* [ %v18, %b4 ], [ %v50, %b5 ]
+  %v55 = phi i8* [ %v47, %b4 ], [ %v61, %b5 ]
+  %v56 = phi i8* [ %v48, %b4 ], [ %v57, %b5 ]
+  %v57 = getelementptr inbounds i8, i8* %v56, i32 1
+  %v58 = load i8, i8* %v56, align 1
+  %v59 = zext i8 %v58 to i32
+  %v60 = mul nsw i32 %v59, 3
+  %v61 = getelementptr inbounds i8, i8* %v55, i32 1
+  %v62 = load i8, i8* %v55, align 1
+  %v63 = zext i8 %v62 to i32
+  %v64 = add nsw i32 %v60, %v63
+  %v65 = mul nsw i32 %v53, 3
+  %v66 = add i32 %v52, 8
+  %v67 = add i32 %v66, %v65
+  %v68 = lshr i32 %v67, 4
+  %v69 = trunc i32 %v68 to i8
+  %v70 = getelementptr inbounds i8, i8* %v54, i32 3
+  store i8 %v69, i8* %v50, align 1
+  %v71 = add i32 %v65, 7
+  %v72 = add i32 %v71, %v64
+  %v73 = lshr i32 %v72, 4
+  %v74 = trunc i32 %v73 to i8
+  store i8 %v74, i8* %v70, align 1
+  %v75 = add i32 %v51, -1
+  %v76 = getelementptr inbounds i8, i8* %v50, i32 2
+  %v77 = icmp eq i32 %v75, 0
+  br i1 %v77, label %b6, label %b5
+
+b6:                                               ; preds = %b5
+  %v78 = add i32 %v49, -2
+  %v79 = getelementptr i8, i8* %v18, i32 %v78
+  %v80 = add i32 %v49, -4
+  %v81 = getelementptr i8, i8* %v18, i32 %v80
+  br label %b7
+
+b7:                                               ; preds = %b6, %b3
+  %v82 = phi i8* [ %v79, %b6 ], [ %v45, %b3 ]
+  %v83 = phi i32 [ %v53, %b6 ], [ %v26, %b3 ]
+  %v84 = phi i32 [ %v64, %b6 ], [ %v32, %b3 ]
+  %v85 = phi i8* [ %v81, %b6 ], [ %v18, %b3 ]
+  %v86 = mul nsw i32 %v84, 3
+  %v87 = add i32 %v83, 8
+  %v88 = add i32 %v87, %v86
+  %v89 = lshr i32 %v88, 4
+  %v90 = trunc i32 %v89 to i8
+  %v91 = getelementptr inbounds i8, i8* %v85, i32 3
+  store i8 %v90, i8* %v82, align 1
+  %v92 = mul nsw i32 %v84, 4
+  %v93 = add nsw i32 %v92, 7
+  %v94 = lshr i32 %v93, 4
+  %v95 = trunc i32 %v94 to i8
+  store i8 %v95, i8* %v91, align 1
+  %v96 = add nsw i32 %v8, 1
+  %v97 = icmp eq i32 %v96, 2
+  br i1 %v97, label %b8, label %b3
+
+b8:                                               ; preds = %b7
+  %v98 = add i32 %v5, 2
+  %v99 = add nsw i32 %v6, 1
+  %v100 = load i32, i32* %v1, align 4
+  %v101 = icmp slt i32 %v98, %v100
+  br i1 %v101, label %b2, label %b9
+
+b9:                                               ; preds = %b8
+  br label %b10
+
+b10:                                              ; preds = %b9, %b0
+  ret void
+}
+
+attributes #0 = { nounwind optsize "target-cpu"="hexagonv60" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi6.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi6.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi6.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,66 @@
+; RUN: llc -march=hexagon -O2 -debug-only=pipeliner < %s -o - 2>&1 > /dev/null | FileCheck %s
+
+; Test that the phi in the first epilog block is getter the correct
+; value from the kernel block. In this bug, the phi was using the value
+; defined in the loop instead of the Phi valued defined in the kernel.
+; We need to use the kernel's phi value (if the Phi in the kernel is the
+; last definition).
+
+; CHECK: New block
+; CHECK: %[[REG:([0-9]+)]]:intregs = PHI %{{.*}}, %[[REG1:([0-9]+)]]
+; CHECK: %[[REG1]]:intregs = A2_addi
+; CHECK: epilog:
+; CHECK: %{{[0-9]+}}:intregs = PHI %{{.*}}, %[[REG]]
+
+define void @f0(i32 %a0, i32 %a1) #0 {
+b0:
+  %v0 = icmp sgt i32 %a0, 64
+  br i1 %v0, label %b1, label %b3
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v1 = phi i32 [ %a0, %b1 ], [ %v13, %b2 ]
+  %v2 = phi <16 x i32>* [ null, %b1 ], [ %v3, %b2 ]
+  %v3 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 1
+  %v4 = load <16 x i32>, <16 x i32>* %v2, align 64
+  %v5 = load <16 x i32>, <16 x i32>* undef, align 64
+  %v6 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v5, <16 x i32> undef, i32 1)
+  %v7 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> undef, <16 x i32> %v6)
+  %v8 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> undef, <32 x i32> %v7, i32 undef)
+  %v9 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v8, <32 x i32> undef, i32 undef)
+  %v10 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v9, <16 x i32> zeroinitializer, i32 undef)
+  %v11 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v10)
+  %v12 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> undef, <16 x i32> %v11, i32 %a1)
+  store <16 x i32> %v12, <16 x i32>* null, align 64
+  %v13 = add nsw i32 %v1, -64
+  %v14 = icmp sgt i32 %v13, 64
+  br i1 %v14, label %b2, label %b3
+
+b3:                                               ; preds = %b2, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32>, <32 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32>, <16 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32>, <16 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #0
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi8.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi8.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi8.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,44 @@
+; RUN: llc -march=hexagon -mno-pairing -mno-compound < %s | FileCheck %s
+
+; Test that we generate the correct phi names in the epilog when the pipeliner
+; schedules a phi and it's loop definition in different stages, e.g., a phi is
+; scheduled in stage 2, but the loop definition in scheduled in stage 0). The
+; code in generateExistingPhis was generating the wrong name for the last
+; epilog bock.
+
+; CHECK: endloop0
+; CHECK: sub([[REG:r([0-9]+)]],r{{[0-9]+}}):sat
+; CHECK-NOT: sub([[REG]],r{{[0-9]+}}):sat
+
+define void @f0() {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  br i1 undef, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v0 = phi i32 [ %v8, %b3 ], [ 7, %b2 ]
+  %v1 = phi i32 [ %v6, %b3 ], [ undef, %b2 ]
+  %v2 = phi i32 [ %v1, %b3 ], [ undef, %b2 ]
+  %v3 = getelementptr inbounds [9 x i32], [9 x i32]* undef, i32 0, i32 %v0
+  %v4 = add nsw i32 %v0, -2
+  %v5 = getelementptr inbounds [9 x i32], [9 x i32]* undef, i32 0, i32 %v4
+  %v6 = load i32, i32* %v5, align 4
+  %v7 = tail call i32 @llvm.hexagon.A2.subsat(i32 %v2, i32 %v6)
+  store i32 %v7, i32* %v3, align 4
+  %v8 = add i32 %v0, -1
+  %v9 = icmp sgt i32 %v8, 1
+  br i1 %v9, label %b3, label %b4
+
+b4:                                               ; preds = %b3
+  unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.subsat(i32, i32) #0
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phis.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phis.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phis.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phis.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,54 @@
+; RUN: llc -march=hexagon -enable-pipeliner -pipeliner-max-stages=2 \
+; RUN:     -pipeliner-ignore-recmii -disable-hexagon-nv-schedule -stats -o /dev/null\
+; RUN:     -enable-aa-sched-mi < %s 2>&1 | FileCheck %s --check-prefix=STATS
+; REQUIRES: asserts
+;
+; Test that we generate the correct phis in the last epilog block when
+; allowing multiple stages.
+;
+; STATS: 1 pipeliner        - Number of loops software pipelined
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b6, label %b1
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b6, label %b2
+
+b2:                                               ; preds = %b1
+  br label %b4
+
+b3:                                               ; preds = %b4, %b3
+  %v0 = add nsw i32 0, 57344
+  %v1 = trunc i32 %v0 to i16
+  store i16 %v1, i16* null, align 2, !tbaa !0
+  %v2 = getelementptr inbounds i8, i8* null, i32 undef
+  %v3 = load i8, i8* %v2, align 1, !tbaa !4
+  %v4 = zext i8 %v3 to i32
+  %v5 = shl nuw nsw i32 %v4, 6
+  %v6 = add nsw i32 %v5, 57344
+  %v7 = trunc i32 %v6 to i16
+  store i16 %v7, i16* undef, align 2, !tbaa !0
+  br i1 undef, label %b5, label %b3
+
+b4:                                               ; preds = %b5, %b2
+  %v8 = phi i32 [ 0, %b2 ], [ %v9, %b5 ]
+  br label %b3
+
+b5:                                               ; preds = %b3
+  %v9 = add i32 %v8, 1
+  %v10 = icmp eq i32 %v9, undef
+  br i1 %v10, label %b6, label %b4
+
+b6:                                               ; preds = %b5, %b1, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!2, !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,72 @@
+; RUN: llc -fp-contract=fast -O3 -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that the pipeliner doesn't ICE due because the PHI generation
+; code in the epilog does not attempt to reuse an existing PHI.
+; Similar test case as swp-epilog-reuse.ll but with a couple of
+; differences.
+
+; Function Attrs: nounwind
+define void @f0(float* noalias %a0, float* noalias %a1) #0 {
+b0:
+  %v0 = getelementptr inbounds float, float* %a1, i32 2
+  br i1 undef, label %b1, label %b6
+
+b1:                                               ; preds = %b5, %b0
+  %v1 = phi float* [ undef, %b5 ], [ %v0, %b0 ]
+  %v2 = phi float* [ %v32, %b5 ], [ undef, %b0 ]
+  %v3 = getelementptr inbounds float, float* %a0, i32 undef
+  %v4 = getelementptr inbounds float, float* %v1, i32 1
+  br i1 undef, label %b2, label %b5
+
+b2:                                               ; preds = %b1
+  %v5 = getelementptr float, float* %v3, i32 1
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v6 = phi float* [ %v5, %b2 ], [ %v20, %b3 ]
+  %v7 = phi float [ %v19, %b3 ], [ undef, %b2 ]
+  %v8 = phi float [ %v7, %b3 ], [ undef, %b2 ]
+  %v9 = phi float* [ %v15, %b3 ], [ %v4, %b2 ]
+  %v10 = bitcast float* %v6 to i8*
+  %v11 = fadd float undef, 0.000000e+00
+  %v12 = fadd float undef, %v11
+  %v13 = fadd float %v7, %v12
+  %v14 = fmul float %v13, 3.906250e-03
+  %v15 = getelementptr inbounds float, float* %v9, i32 1
+  store float %v14, float* %v9, align 4, !tbaa !0
+  %v16 = getelementptr i8, i8* %v10, i32 undef
+  %v17 = bitcast i8* %v16 to float*
+  %v18 = load float, float* %v17, align 4, !tbaa !0
+  %v19 = fadd float %v18, undef
+  %v20 = getelementptr float, float* %v6, i32 2
+  %v21 = icmp ult float* %v15, %v2
+  br i1 %v21, label %b3, label %b4
+
+b4:                                               ; preds = %b3
+  %v22 = getelementptr float, float* %v4, i32 undef
+  br label %b5
+
+b5:                                               ; preds = %b4, %b1
+  %v23 = phi float* [ %v4, %b1 ], [ %v22, %b4 ]
+  %v24 = phi float [ undef, %b1 ], [ %v8, %b4 ]
+  %v25 = fadd float %v24, undef
+  %v26 = fadd float %v25, undef
+  %v27 = fadd float undef, %v26
+  %v28 = fadd float undef, %v27
+  %v29 = fpext float %v28 to double
+  %v30 = fmul double %v29, 0x3F7111112119E8FB
+  %v31 = fptrunc double %v30 to float
+  store float %v31, float* %v23, align 4, !tbaa !0
+  %v32 = getelementptr inbounds float, float* %v2, i32 undef
+  br i1 undef, label %b1, label %b6
+
+b6:                                               ; preds = %b5, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse3.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse3.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse3.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,90 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Test that the pipeliner doesn't ICE due to incorrect PHI
+; generation code that attemps to reuse an exsting PHI.
+; Similar to the other swp-epillog-reuse test, but from a
+; different test case.
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32) #0
+
+; Function Attrs: nounwind
+define void @f0() #1 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  br i1 undef, label %b2, label %b1
+
+b2:                                               ; preds = %b2, %b1
+  br i1 undef, label %b3, label %b2
+
+b3:                                               ; preds = %b3, %b2
+  %v0 = phi i16 [ %v10, %b3 ], [ undef, %b2 ]
+  %v1 = phi i16 [ %v0, %b3 ], [ undef, %b2 ]
+  %v2 = phi i32 [ %v26, %b3 ], [ undef, %b2 ]
+  %v3 = phi i32* [ undef, %b3 ], [ undef, %b2 ]
+  %v4 = phi i16* [ %v5, %b3 ], [ undef, %b2 ]
+  %v5 = getelementptr inbounds i16, i16* %v4, i32 1
+  %v6 = sext i16 %v1 to i32
+  %v7 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 0, i32 %v6, i32 undef)
+  %v8 = getelementptr inbounds i16, i16* %v4, i32 2
+  %v9 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v7, i32 undef, i32 undef)
+  %v10 = load i16, i16* %v8, align 2, !tbaa !0
+  %v11 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v9, i32 undef, i32 undef)
+  %v12 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v11, i32 undef)
+  %v13 = getelementptr [166 x i32], [166 x i32]* null, i32 0, i32 undef
+  %v14 = load i32, i32* %v13, align 4
+  %v15 = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %v14, i32 undef)
+  %v16 = call i64 @llvm.hexagon.S2.asr.i.p(i64 %v15, i32 15)
+  %v17 = call i32 @llvm.hexagon.A2.sat(i64 %v16)
+  %v18 = call i32 @llvm.hexagon.A2.subsat(i32 %v12, i32 %v17)
+  %v19 = getelementptr [166 x i32], [166 x i32]* null, i32 0, i32 undef
+  %v20 = load i32, i32* %v19, align 4
+  %v21 = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %v20, i32 0)
+  %v22 = call i64 @llvm.hexagon.S2.asr.i.p(i64 %v21, i32 15)
+  %v23 = call i32 @llvm.hexagon.A2.sat(i64 %v22)
+  %v24 = call i32 @llvm.hexagon.A2.subsat(i32 %v18, i32 %v23)
+  %v25 = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %v24, i32 undef)
+  store i32 %v25, i32* %v3, align 4, !tbaa !4
+  %v26 = add i32 %v2, 1
+  %v27 = icmp eq i32 %v26, 164
+  br i1 %v27, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  call void @llvm.trap()
+  unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.sat(i64) #0
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.subsat(i32, i32) #0
+
+; Function Attrs: noreturn nounwind
+declare void @llvm.trap() #2
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind "target-cpu"="hexagonv55" }
+attributes #2 = { noreturn nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"long", !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse4.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse4.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse4.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,110 @@
+; RUN: llc -march=hexagon -enable-pipeliner -hexagon-expand-condsets=0 < %s
+; REQUIRES: asserts
+
+; Disable expand-condsets because it will assert on undefined registers.
+
+; Another test that the pipeliner doesn't ICE when reusing a
+; PHI in the epilog code.
+
+ at g0 = external global [18 x i16], align 8
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.sxth(i32) #0
+
+; Function Attrs: nounwind
+define void @f0() #1 {
+b0:
+  %v0 = alloca [166 x i32], align 8
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = icmp eq i16 undef, 0
+  br i1 %v1, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  br i1 undef, label %b3, label %b4
+
+b3:                                               ; preds = %b3, %b2
+  %v2 = add i32 0, 2
+  br i1 undef, label %b3, label %b4
+
+b4:                                               ; preds = %b3, %b2
+  %v3 = phi i32* [ undef, %b2 ], [ undef, %b3 ]
+  %v4 = phi i32 [ 0, %b2 ], [ %v2, %b3 ]
+  %v5 = getelementptr [18 x i16], [18 x i16]* @g0, i32 0, i32 undef
+  br label %b5
+
+b5:                                               ; preds = %b5, %b4
+  %v6 = phi i16 [ 0, %b4 ], [ %v17, %b5 ]
+  %v7 = phi i16 [ undef, %b4 ], [ %v6, %b5 ]
+  %v8 = phi i32 [ %v4, %b4 ], [ %v35, %b5 ]
+  %v9 = phi i32* [ %v3, %b4 ], [ undef, %b5 ]
+  %v10 = phi i16* [ undef, %b4 ], [ %v12, %b5 ]
+  %v11 = add i32 %v8, 0
+  %v12 = getelementptr inbounds i16, i16* %v10, i32 1
+  %v13 = sext i16 %v7 to i32
+  %v14 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 0, i32 %v13, i32 undef)
+  %v15 = getelementptr inbounds i16, i16* %v10, i32 2
+  %v16 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v14, i32 undef, i32 undef)
+  %v17 = load i16, i16* %v15, align 2, !tbaa !0
+  %v18 = sext i16 %v17 to i32
+  %v19 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v16, i32 %v18, i32 undef)
+  %v20 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v19, i32 undef)
+  %v21 = getelementptr [166 x i32], [166 x i32]* %v0, i32 0, i32 %v11
+  %v22 = load i32, i32* %v21, align 4
+  %v23 = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %v22, i32 undef)
+  %v24 = call i64 @llvm.hexagon.S2.asr.i.p(i64 %v23, i32 15)
+  %v25 = call i32 @llvm.hexagon.A2.sat(i64 %v24)
+  %v26 = call i32 @llvm.hexagon.A2.subsat(i32 %v20, i32 %v25)
+  %v27 = load i16, i16* %v5, align 4
+  %v28 = sext i16 %v27 to i32
+  %v29 = call i32 @llvm.hexagon.A2.sxth(i32 %v28)
+  %v30 = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 undef, i32 %v29)
+  %v31 = call i64 @llvm.hexagon.S2.asr.i.p(i64 %v30, i32 15)
+  %v32 = call i32 @llvm.hexagon.A2.sat(i64 %v31)
+  %v33 = call i32 @llvm.hexagon.A2.subsat(i32 %v26, i32 %v32)
+  %v34 = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %v33, i32 undef)
+  store i32 %v34, i32* %v9, align 4, !tbaa !4
+  %v35 = add i32 %v8, 1
+  %v36 = icmp eq i32 %v35, 164
+  br i1 %v36, label %b6, label %b5
+
+b6:                                               ; preds = %b5
+  call void @llvm.trap()
+  unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.sat(i64) #0
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.subsat(i32, i32) #0
+
+; Function Attrs: noreturn nounwind
+declare void @llvm.trap() #2
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind "target-cpu"="hexagonv55" }
+attributes #2 = { noreturn nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"long", !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-exit-fixup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-exit-fixup.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-exit-fixup.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-exit-fixup.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,73 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Make sure we fix up the Phis when we connect the last
+; epilog block to the CFG.
+
+define void @f0(i16* nocapture %a0) #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  br label %b3
+
+b2:                                               ; preds = %b0
+  unreachable
+
+b3:                                               ; preds = %b3, %b1
+  br i1 undef, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  br i1 undef, label %b6, label %b5
+
+b5:                                               ; preds = %b4
+  store i16 4096, i16* %a0, align 2
+  br label %b11
+
+b6:                                               ; preds = %b4
+  br i1 undef, label %b7, label %b8
+
+b7:                                               ; preds = %b7, %b6
+  br label %b7
+
+b8:                                               ; preds = %b8, %b6
+  br i1 undef, label %b9, label %b8
+
+b9:                                               ; preds = %b8
+  %v0 = icmp sgt i32 undef, 1
+  br i1 %v0, label %b10, label %b11
+
+b10:                                              ; preds = %b10, %b9
+  %v1 = phi i32 [ %v8, %b10 ], [ 1, %b9 ]
+  %v2 = getelementptr inbounds [11 x i32], [11 x i32]* undef, i32 0, i32 %v1
+  %v3 = load i32, i32* undef, align 4
+  %v4 = add nsw i32 %v3, 0
+  %v5 = add nsw i32 %v4, 2048
+  %v6 = lshr i32 %v5, 12
+  %v7 = trunc i32 %v6 to i16
+  store i16 %v7, i16* undef, align 2
+  %v8 = add nsw i32 %v1, 1
+  %v9 = icmp eq i32 %v8, undef
+  br i1 %v9, label %b11, label %b10
+
+b11:                                              ; preds = %b10, %b9, %b5
+  %v10 = phi i1 [ false, %b9 ], [ false, %b5 ], [ %v0, %b10 ]
+  br i1 undef, label %b16, label %b12
+
+b12:                                              ; preds = %b11
+  br i1 undef, label %b13, label %b16
+
+b13:                                              ; preds = %b12
+  br i1 %v10, label %b14, label %b15
+
+b14:                                              ; preds = %b14, %b13
+  br i1 undef, label %b15, label %b14
+
+b15:                                              ; preds = %b14, %b13
+  br label %b16
+
+b16:                                              ; preds = %b15, %b12, %b11
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-fix-last-use.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-fix-last-use.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-fix-last-use.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-fix-last-use.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; We need to rename uses that occurs after the loop.
+
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b5
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b2, label %b4
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = phi i32 [ %v1, %b2 ], [ 1, %b1 ]
+  store i16 0, i16* undef, align 2
+  store i16 0, i16* undef, align 2
+  %v1 = add nsw i32 %v0, 4
+  %v2 = icmp slt i32 %v1, undef
+  br i1 %v2, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  %v3 = icmp eq i32 %v1, undef
+  br i1 %v3, label %b5, label %b4
+
+b4:                                               ; preds = %b4, %b3, %b1
+  br i1 undef, label %b5, label %b4
+
+b5:                                               ; preds = %b4, %b3, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-fix-last-use1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-fix-last-use1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-fix-last-use1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-fix-last-use1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,68 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = phi i32 [ undef, %b1 ], [ %v13, %b2 ]
+  %v1 = phi i32 [ 0, %b1 ], [ %v20, %b2 ]
+  %v2 = zext i32 %v0 to i64
+  %v3 = or i64 0, %v2
+  %v4 = tail call i64 @llvm.hexagon.S2.lsl.r.vh(i64 %v3, i32 4)
+  %v5 = or i64 %v4, -9223231297218904064
+  %v6 = lshr i64 %v5, 32
+  %v7 = trunc i64 %v6 to i32
+  %v8 = tail call i64 @llvm.hexagon.S2.vzxthw(i32 %v7)
+  %v9 = lshr i64 %v8, 32
+  %v10 = trunc i64 %v9 to i32
+  %v11 = tail call i32 @llvm.hexagon.S2.lsr.r.r(i32 %v10, i32 undef)
+  %v12 = load i64, i64* undef, align 8, !tbaa !0
+  %v13 = trunc i64 %v12 to i32
+  %v14 = lshr i64 %v12, 32
+  %v15 = trunc i64 %v14 to i32
+  %v16 = zext i32 %v11 to i64
+  %v17 = shl nuw i64 %v16, 32
+  %v18 = or i64 %v17, 0
+  %v19 = tail call i32 @llvm.hexagon.S2.vsatwuh(i64 %v18)
+  %v20 = add nsw i32 %v1, 1
+  %v21 = icmp eq i32 %v20, undef
+  br i1 %v21, label %b3, label %b2
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  %v22 = phi i32 [ %v19, %b3 ], [ undef, %b0 ]
+  %v23 = phi i32 [ %v15, %b3 ], [ undef, %b0 ]
+  %v24 = zext i32 %v22 to i64
+  %v25 = shl nuw i64 %v24, 32
+  %v26 = or i64 %v25, 0
+  store i64 %v26, i64* undef, align 8, !tbaa !0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.lsl.r.vh(i64, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.vzxthw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.vsatwuh(i64) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"long long", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-intreglow8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-intreglow8.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-intreglow8.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-intreglow8.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,91 @@
+; RUN: llc -march=hexagon -verify-machineinstrs < %s
+; REQUIRES: asserts
+
+; Test that we constrain the new register operands for instructions
+; to be the same as the register class of the original instruction.
+; In this case, the register class of a valign scalar operand changed
+; from IntRegsLow8 to IntRegs, which is incorrect.
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b6
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b4, %b1
+  %v0 = phi <16 x i32> [ undef, %b1 ], [ %v17, %b4 ]
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v1 = phi i32 [ 0, %b2 ], [ %v19, %b3 ]
+  %v2 = phi i32 [ undef, %b2 ], [ %v18, %b3 ]
+  %v3 = phi <16 x i32> [ %v0, %b2 ], [ %v17, %b3 ]
+  %v4 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 0, i32 0)
+  %v5 = tail call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> undef, <16 x i32> undef, i32 %v2)
+  %v6 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffuh(<16 x i32> %v5, <16 x i32> undef)
+  %v7 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v6, <16 x i32> %v6)
+  %v8 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v7)
+  %v9 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v8, i32 17)
+  %v10 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v9, i32 151587081)
+  %v11 = tail call <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32> %v10, <16 x i32> undef)
+  %v12 = tail call <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32> undef, <16 x i32> %v11)
+  %v13 = tail call <16 x i32> @llvm.hexagon.V6.vmaxh(<16 x i32> %v12, <16 x i32> undef)
+  %v14 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh(<16 x i32> %v13, i32 %v4)
+  %v15 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh(<16 x i32> undef, i32 %v4)
+  %v16 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v14, <16 x i32> %v15)
+  %v17 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v3, <16 x i32> %v16)
+  %v18 = add nsw i32 %v2, -2
+  %v19 = add nsw i32 %v1, 1
+  %v20 = icmp eq i32 %v19, 2
+  br i1 %v20, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  br i1 undef, label %b5, label %b2
+
+b5:                                               ; preds = %b4
+  unreachable
+
+b6:                                               ; preds = %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vabsdiffuh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vmaxh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vmpyiwh(<16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-kernel-last-use.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-kernel-last-use.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-kernel-last-use.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-kernel-last-use.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,74 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; This test caused an assert because there was a use of an instruction
+; that was scheduled at stage 0, but no phi were added in the epilog.
+
+%s.0 = type <{ i8*, i8*, i16, i8, i8, i8 }>
+%s.1 = type { [4 x i16], [4 x i16], [4 x i16], [4 x i16], i32, i32, i32, i8, [10 x i32], [10 x [3 x i32]], [4 x i64], i8 }
+%s.2 = type { [3 x i16], [4 x i8], i32, [3 x %s.3], [3 x %s.3], [3 x %s.3], [3 x %s.3], [3 x %s.3], [3 x %s.3], [6 x %s.3], [6 x %s.3], [6 x %s.3], i8, [3 x [3 x i16]], [3 x [3 x i16]], [3 x i16], [3 x i16], [6 x i16], [2 x i32], [10 x i32], [2 x i32], [2 x i32], [2 x [3 x i32]], [2 x i32], [2 x [3 x i64]], [2 x [3 x [3 x i32]]], [2 x [3 x i32]] }
+%s.3 = type { i8, i8, i8, i8 }
+
+ at g0 = external constant %s.0, align 1
+
+define void @f0(i8 zeroext %a0, i32 %a1, i32 %a2, i8 zeroext %a3, %s.1* nocapture %a4, %s.2* %a5, i8 zeroext %a6) #0 {
+b0:
+  br i1 undef, label %b1, label %b7
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b2, label %b3
+
+b2:                                               ; preds = %b1
+  unreachable
+
+b3:                                               ; preds = %b1
+  %v0 = select i1 undef, i32 2, i32 4
+  %v1 = load i8, i8* undef, align 1
+  %v2 = zext i8 %v1 to i32
+  %v3 = icmp uge i32 %v2, %v0
+  br label %b4
+
+b4:                                               ; preds = %b4, %b3
+  br i1 undef, label %b4, label %b8
+
+b5:                                               ; preds = %b10
+  unreachable
+
+b6:                                               ; preds = %b10
+  call void @f1(%s.0* @g0, i32 undef, i32 %v21, i32 undef, i32 undef)
+  unreachable
+
+b7:                                               ; preds = %b0
+  ret void
+
+b8:                                               ; preds = %b8, %b4
+  %v4 = phi i32 [ %v11, %b8 ], [ undef, %b4 ]
+  %v5 = phi i32 [ %v12, %b8 ], [ 0, %b4 ]
+  %v6 = xor i1 false, %v3
+  %v7 = zext i1 %v6 to i32
+  %v8 = shl nuw nsw i32 %v7, 1
+  %v9 = shl i32 %v4, 2
+  %v10 = or i32 0, %v9
+  %v11 = or i32 %v10, %v8
+  %v12 = add i32 %v5, 1
+  %v13 = icmp ult i32 %v12, %v0
+  br i1 %v13, label %b8, label %b9
+
+b9:                                               ; preds = %b9, %b8
+  %v14 = phi i32 [ %v21, %b9 ], [ %v11, %b8 ]
+  %v15 = icmp ne i32 undef, 1
+  %v16 = xor i1 %v15, %v3
+  %v17 = zext i1 %v16 to i32
+  %v18 = shl nuw nsw i32 %v17, 1
+  %v19 = shl i32 %v14, 2
+  %v20 = or i32 0, %v19
+  %v21 = or i32 %v20, %v18
+  br i1 undef, label %b9, label %b10
+
+b10:                                              ; preds = %b9
+  br i1 undef, label %b6, label %b5
+}
+
+declare void @f1(%s.0*, i32, i32, i32, i32)
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-kernel-phi1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-kernel-phi1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-kernel-phi1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-kernel-phi1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,46 @@
+; RUN: llc -march=hexagon -enable-pipeliner-opt-size < %s | FileCheck %s
+
+; Test that we generate the correct names for the phis in the kernel for the
+; incoming values. In this case, the loop contains a phi and has another phi
+; as its loop definition, and the two phis are scheduled in different stages.
+;
+;    vreg5 = phi(x, vreg4) is scheduled in stage 1, cycle 0
+;    vreg4 = phi(y, z) is scheduled in stage 0, cycle 0
+
+; CHECK-DAG: :[[REG0:[0-9]+]]{{.*}} = {{.*}},#17
+; CHECK-DAG: loop0(.LBB0_[[LOOP:.]],
+; CHECK: .LBB0_[[LOOP]]:
+; CHECK: r{{[0-9]+}} = sxth(r[[REG0]])
+; CHECK: endloop0
+
+; Function Attrs: nounwind optsize
+define void @f0() #0 {
+b0:
+  %v0 = getelementptr [8 x i16], [8 x i16]* undef, i32 0, i32 7
+  %v1 = bitcast i16* %v0 to [8 x i16]*
+  br label %b2
+
+b1:                                               ; preds = %b2
+  unreachable
+
+b2:                                               ; preds = %b2, %b0
+  %v2 = phi i32 [ 7, %b0 ], [ %v11, %b2 ]
+  %v3 = phi i16 [ 17, %b0 ], [ %v7, %b2 ]
+  %v4 = phi i16 [ 18, %b0 ], [ %v3, %b2 ]
+  %v5 = sext i16 %v4 to i32
+  %v6 = getelementptr i16, i16* null, i32 -2
+  %v7 = load i16, i16* %v6, align 2
+  %v8 = sext i16 %v7 to i32
+  %v9 = tail call i32 @llvm.hexagon.A2.subsat(i32 %v5, i32 %v8)
+  %v10 = trunc i32 %v9 to i16
+  store i16 %v10, i16* null, align 2
+  %v11 = add nsw i32 %v2, -1
+  %v12 = icmp sgt i32 %v11, 1
+  br i1 %v12, label %b2, label %b1
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.subsat(i32, i32) #1
+
+attributes #0 = { nounwind optsize "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-large-rec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-large-rec.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-large-rec.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-large-rec.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,66 @@
+; RUN: llc -march=hexagon -enable-pipeliner -stats \
+; RUN:     -pipeliner-prune-loop-carried=false -fp-contract=fast \
+; RUN:     -o /dev/null < %s 2>&1 | FileCheck %s --check-prefix=STATS
+
+; That that we do not pipeline this loop. The recurrence is too large. If
+; we pipeline this example, that means we're not checking the complete
+; chain of dependences.
+
+; STATS-NOT: 1 pipeliner   - Number of loops software pipelined
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0, i32 %a1, double %a2, double %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, [1000 x i8]* %a9, [1000 x i8]* %a10, [1000 x i8]* %a11) #0 {
+b0:
+  br i1 undef, label %b1, label %b4
+
+b1:                                               ; preds = %b3, %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = phi i8* [ %v22, %b2 ], [ %a5, %b1 ]
+  %v1 = phi i8* [ %v23, %b2 ], [ %a6, %b1 ]
+  %v2 = phi i8* [ %v24, %b2 ], [ %a7, %b1 ]
+  %v3 = phi i32 [ %v20, %b2 ], [ 0, %b1 ]
+  %v4 = load i8, i8* %v0, align 1, !tbaa !0
+  %v5 = zext i8 %v4 to i32
+  %v6 = load i8, i8* %v1, align 1, !tbaa !0
+  %v7 = sext i8 %v6 to i32
+  %v8 = load i8, i8* %v2, align 1, !tbaa !0
+  %v9 = sext i8 %v8 to i32
+  %v10 = mul nsw i32 %v9, %v7
+  %v11 = add nsw i32 %v10, %v5
+  %v12 = trunc i32 %v11 to i8
+  store i8 %v12, i8* undef, align 1, !tbaa !0
+  %v13 = load i8, i8* %v2, align 1, !tbaa !0
+  %v14 = sext i8 %v13 to i32
+  %v15 = load i8, i8* undef, align 1, !tbaa !0
+  %v16 = sext i8 %v15 to i32
+  %v17 = mul nsw i32 %v16, %v14
+  %v18 = add i32 %v17, %v11
+  %v19 = trunc i32 %v18 to i8
+  store i8 %v19, i8* %v0, align 1, !tbaa !0
+  %v20 = add nsw i32 %v3, 1
+  store i8 0, i8* undef, align 1, !tbaa !0
+  %v21 = icmp eq i32 %v20, undef
+  %v22 = getelementptr i8, i8* %v0, i32 1
+  %v23 = getelementptr i8, i8* %v1, i32 1
+  %v24 = getelementptr i8, i8* %v2, i32 1
+  br i1 %v21, label %b3, label %b2
+
+b3:                                               ; preds = %b2
+  tail call void @f1(i32 %a1, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, [1000 x i8]* %a9, [1000 x i8]* %a10, [1000 x i8]* %a11, i8 signext 1) #2
+  br i1 undef, label %b4, label %b1
+
+b4:                                               ; preds = %b3, %b0
+  ret void
+}
+
+declare void @f1(i32, i8*, i8*, i8*, i8*, i8*, [1000 x i8]*, [1000 x i8]*, [1000 x i8]*, i8 signext) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { "target-cpu"="hexagonv55" }
+attributes #2 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-loop-carried-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-loop-carried-crash.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-loop-carried-crash.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-loop-carried-crash.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,344 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that the pipeliner doesn't assert in the addLoopCarriedDependence
+; function with the message "What happened to the chain edge?". The bug is that
+; the pass wasn't checking for a sequence of chain edges from the load to the
+; store. The code assumed a single chain edge only.
+
+%0 = type { %1*, [2 x [2 x %39]], [2 x [2 x %39]], [2 x i8], [2 x i8], [2 x i8], [2 x i8], i32, i32 }
+%1 = type { %0, %2, %3, %15, %16*, %98*, %99, %105*, %295*, %299, %303, %304, %304, %307, i8, i8, i32 }
+%2 = type <{ %1*, i8, [3 x i8] }>
+%3 = type { %1*, i8, i32, i8, %4*, %8, %307, %12*, [10 x i8*], [10 x i8], %307 }
+%4 = type { %5*, %6 }
+%5 = type opaque
+%6 = type { %7 }
+%7 = type { i64 }
+%8 = type { %9, %4*, [16 x i32], void (%8*, i8*, i32)*, i8*, %307, %307 }
+%9 = type { [16 x %11], i16, i8, %10*, %11 }
+%10 = type { i64, [8 x i8] }
+%11 = type { %307 }
+%12 = type { %10, %13, %13, i32, i32, i32, void (%10*)*, void (%10*)*, i32 (%10*)*, void (%10*)*, i32, i64* }
+%13 = type { %14 }
+%14 = type { i16, i16, i32, i32, i32 }
+%15 = type <{ %1*, i8, [3 x i8] }>
+%16 = type { %1*, i32, i32, i8, i16, i16, i8, %17, i32, %22, %27, [4 x i8], [6 x [512 x %28]], %94, [6 x %29], [6 x i8*], %94, [7 x %95], [7 x i8*], [7 x i8*], %96*, %97, [8 x i8] }
+%17 = type { %18*, %21, %21, i32, i8 }
+%18 = type { %19, %19, %20 }
+%19 = type { i32, i16, i16 }
+%20 = type { i32, i32, i32 }
+%21 = type { i32, i32, i32, i32 }
+%22 = type { %23*, %24 }
+%23 = type { i8, %10 }
+%24 = type { %25 }
+%25 = type { %26 }
+%26 = type { i32 }
+%27 = type { i32, i32, i32, i8* }
+%28 = type { i16, i16, i16, i16 }
+%29 = type <{ i8*, i8*, i32, i16, [2 x i8], %24, %28*, i32, i8, [3 x i8], i32, %30, i8, i8, [2 x i8] }>
+%30 = type { %31, %44 }
+%31 = type { %32* }
+%32 = type { %33*, %24, i16, i16, i16, %37*, i16, i16, i8, i8, i32 }
+%33 = type { %34, [5 x %35], %36 }
+%34 = type { i32, i8 }
+%35 = type { [2 x i32] }
+%36 = type { i32, i8 }
+%37 = type <{ %38, i16, i16, i8, [3 x i8], %42*, %43*, i64*, [4 x i8], i64, i16, i8, i8, i16, i16, i32, i8, [3 x i8] }>
+%38 = type { %39*, i8, %40, i8, %41 }
+%39 = type { i64 }
+%40 = type { i32, i32, %24, %24, i32, i32, i16, i16, i16, i8, i8, i8, i8, i16 }
+%41 = type { i8, i16*, i32*, i32, i8, i8* }
+%42 = type { i16, i16, i16 }
+%43 = type { i64, [280 x i8] }
+%44 = type { %45* }
+%45 = type { %38, %39*, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %46, i8, i8, i8, %48, i8, i8, i16, i16, i8, i8, i8, %50*, %67*, i16*, i16*, i16, i16, i8, i16*, i8, i8, i8, i8, %69*, %87*, %91*, %92*, %92*, %93*, i8, i8, i8, i8, i8, %40*, i8, i32, i8, i8, i32, i32, i32, i32, %17, i32 }
+%46 = type { %47 }
+%47 = type { i8 }
+%48 = type { %49 }
+%49 = type { i16 }
+%50 = type { i16, i16, %51, %53, i16, i16, i16, i16, [39 x i16], [3 x i16], [39 x i16], [5 x i16], %54, %57, %60, i8, %63, %66 }
+%51 = type { %52 }
+%52 = type { i16 }
+%53 = type { i16 }
+%54 = type { i32, i32, i32, i32, i32, i8, i8, i8, i8, %55 }
+%55 = type { %56 }
+%56 = type { i8, i8 }
+%57 = type { %58, [2 x %59] }
+%58 = type { i32 }
+%59 = type { i32 }
+%60 = type { i24, i16, [4 x i16], [2 x %61] }
+%61 = type { [4 x %62], i16, i8 }
+%62 = type { i16, i8, i32 }
+%63 = type { %64, i16, [3 x %65], [3 x %65] }
+%64 = type { i8 }
+%65 = type { i8, i16, i16 }
+%66 = type { i8, i32, i32, i16, i16 }
+%67 = type { %68, i8, i8 }
+%68 = type { i32, i32 }
+%69 = type { i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, %70, i32, i16, i16, i16, [22 x i16], %71, %72, %82, %85, %87, i8, i16 }
+%70 = type { i16 }
+%71 = type { %20, %21, %21, i32, i32, i32, i8 }
+%72 = type { %73 }
+%73 = type { i16, %74, i16, %75, %76, %77, %78, %79, %80 }
+%74 = type { i8, i8 }
+%75 = type { i16 }
+%76 = type { i16 }
+%77 = type { i16 }
+%78 = type { i16 }
+%79 = type { i16 }
+%80 = type { %81, i16, i16, i16, i16 }
+%81 = type { i16, i16 }
+%82 = type { i16, i16, i32, i32, i32, i32, i16, i16, i16, i16, i16, i16, %83, i16, i16, i16, i16, i16, i32, i32, %84, i32, i32 }
+%83 = type { i16 }
+%84 = type { i32 }
+%85 = type { %86, i32, i32, i32, [5 x i64] }
+%86 = type { i32 }
+%87 = type { %88, [4 x i16], [4 x i16], i16, %89, %90 }
+%88 = type { i32 }
+%89 = type { [4 x i32], [4 x i32], [4 x i16], [4 x i16], [4 x i16], [4 x i16], [4 x i16], [4 x i16] }
+%90 = type { i8, i8, i8, [2 x i32], [2 x i32] }
+%91 = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
+%92 = type { i16, i16, [1024 x i16], i8, i8 }
+%93 = type { i16, i16, [8 x i16] }
+%94 = type { i32, i32, i32, i8** }
+%95 = type { %24, %24, %29*, i8, i32, i32 }
+%96 = type { i32, i32, i32, i8** }
+%97 = type { i8, %94, [10 x %96], [10 x i8*] }
+%98 = type opaque
+%99 = type { %100 }
+%100 = type { %1*, %101 }
+%101 = type { %102, %102 }
+%102 = type { i8, i8, %103 }
+%103 = type { %104 }
+%104 = type { i32, i32 }
+%105 = type <{ i8, [3 x i8], %106, [4 x i8], [512 x i64], %12*, %295*, %1*, %107*, %180*, %181*, %182*, %196, [4008 x i8], %197, %18, [228 x i8], %253, %258, %266, %267, i8, i8, i8, [5 x i8], %268, %278, %279, [4428 x i8] }>
+%106 = type { [16 x i8], i8, i16, %12*, %12*, i8, i32, i8 }
+%107 = type <{ [128 x %108], %109*, i16, [2 x i8], %145*, %150, %153, %105*, %1*, i8, [7 x i8] }>
+%108 = type { i16, i16 }
+%109 = type { [2 x [1024 x i8]], %110, [5 x %43], %125, [2 x %133], [28 x i8], %138, i8, [64 x i64], [2 x %92], %143, [10 x i8], i8, [31 x i8], [32 x i8], %150, [12 x i8], [18 x i8], [14 x i8] }
+%110 = type { %111, %113, [16 x %50], [6 x %115], [3 x %116], [6 x %117], [3 x %118], [3 x %119], [3 x %120], [3 x %121], %93, i8, [3 x %122], [3 x %91], %124 }
+%111 = type { %112*, i16, i16, [8 x %112] }
+%112 = type { i16, i32, i32, i32, i32, i16, i8, i32, i16, i16 }
+%113 = type { %114*, i16, i16, [3 x %114] }
+%114 = type { i16, i16, i16, i16, i16, i32, i32, i16, i16, i16, i16, i16, i16, i32, i8, i32 }
+%115 = type { [5 x %69] }
+%116 = type { i16, i16, i16, [12 x i8], [12 x i32], i8, [12 x i32], [12 x i16] }
+%117 = type { i16, i16, i16, [12 x i8], [12 x i32], i8, [12 x i32], [12 x i16] }
+%118 = type { i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, %72, %82 }
+%119 = type { i16, i16, [2 x i16], i16, i16, [19 x i16], i32 }
+%120 = type { i16, i16, [8 x i32] }
+%121 = type { i16, i16, [10 x i32] }
+%122 = type { i16, i32, [10 x %123] }
+%123 = type { i32, i32, i32, i32, i16, i16, i8, i16, i8, i8, i8 }
+%124 = type { i32, i32, i16, i32, i64, i16, i16, i32, i32 }
+%125 = type { [3 x %126], %128, %130, %132 }
+%126 = type { %127 }
+%127 = type { [7 x %67], %33, [3 x %67] }
+%128 = type { %129 }
+%129 = type { i32 }
+%130 = type { %131 }
+%131 = type { i32 }
+%132 = type { i32 }
+%133 = type { i16, [26 x %134] }
+%134 = type { %135, %136, %137 }
+%135 = type { i32 }
+%136 = type { i16 }
+%137 = type { i16 }
+%138 = type { i16, i16, %139, [24 x i8], %141, i8, i8, i16, [24 x i8] }
+%139 = type { i8, %140, [25 x i8] }
+%140 = type { i16, i16, i16, i16 }
+%141 = type { %142 }
+%142 = type { i16, [2 x i8] }
+%143 = type { %144 }
+%144 = type { i16 }
+%145 = type { %146, %147, %148, %149 }
+%146 = type { i32 }
+%147 = type { i32 }
+%148 = type { i32 }
+%149 = type { i32 }
+%150 = type { %151, %152 }
+%151 = type { i32, i32, i32, i32, i32, i16, i16, i16, i16, i8 }
+%152 = type { i8, i16, i8, i8, [4 x i32], i8, i8, i8, i16, [2 x i16], [2 x i16], [5 x i16], i8 }
+%153 = type <{ i8, [3 x i8], %154*, [2 x %160], i16, [2 x i8], [2 x %160], i16, [2 x i8], [2 x %160], i16, [2 x i8], [4 x %161], i16, i16, [2 x %162], i16, [2 x i8], %162*, [2 x %172], i16, [2 x i8], [24 x %173], i16, [2 x i8], [24 x %176], i16, [2 x i8], [24 x %176], i16, [2 x i8], [2 x %177], i16, [2 x i8], [2 x %174], i16, [2 x i8], [2 x %175], i16, [2 x i8], [24 x %176], i16, [2 x i8], %177, %177, [14 x %45], i16, [2 x i8], [14 x %160], i16, [2 x i8], %178*, [4 x i8], [8 x %37], i16, [4 x %42], [2 x i8], [8 x %32], i16, [4 x i16], [2 x i8], %179, i16, i16, i16, i16, i16, i16, i16, [2 x i8], [3 x i64], i16, i8, i8, i16, [2 x i8] }>
+%154 = type { i16, i16, %155 }
+%155 = type { %156 }
+%156 = type { i16, %157, [2 x %158], i8, i16, i8, [12 x %159] }
+%157 = type { i8, i8, i8 }
+%158 = type { i16, i16 }
+%159 = type { i16, i16, i32, i8, i32, i32, i16, i8, i8, i8, i8, i32 }
+%160 = type { %67*, %24, i16, i16, i16, i8, i32, i8, i8, i8, i8, i32, i32, i32 }
+%161 = type <{ i16, [2 x i8], %24, i32, i32, i16, i16, i8, [3 x i8] }>
+%162 = type <{ %38, i32, i16, i16, i8, i8, i16, %163, i16, %111*, %124*, i8, i8, i8, i8, i16, i8, i8, i8, i8, i8, i8, i16*, i32*, i64*, %164*, i8*, %165*, %168*, %169*, %39*, %170*, %171*, %172*, %23**, i8*, i32, i16, i8, [5 x i8], [164 x %39], %105*, [4 x i8] }>
+%163 = type { i16 }
+%164 = type { [131 x i64], [232 x i8], [131 x i32], %39*, i16, i32, i8, i8, i8, i8, %163, i8, i8, %39*, i8, [219 x i8] }
+%165 = type { %39*, i32, i16, i16, i8, i8, %39, i64, i32, i32, i8, %166, i8, %167 }
+%166 = type { i8, i8 }
+%167 = type { i32, i32, i32, i8 }
+%168 = type { i32, i64, i64, i32, i32, i32, i32, i32, i32, i64, i32, i32, i16, i16, i32, i32, i32, i32, i16, i8, i64, i8, i8, i8 }
+%169 = type { %39*, %39*, %163, i8, i32, i32, i32, i16, i16, i32, i8, i8, i8, i8, i16, i8, %168*, %170*, %111*, i8*, i8*, i32*, i8, %16* }
+%170 = type { %39*, i32, i32, i16, i16, i8, i8, i8 }
+%171 = type { i64, i64, i64, i64, i32, i32, i32, i32, i32, i32, i32, i32, i16, i32, i32, i16, i32, i16, [20 x i16], i16, i16, i8, i8, i8, i8, [78 x i8], [78 x i8], [39 x i8], i8, i32, i32 }
+%172 = type { %38, %39*, i16, i16, i16*, i16*, i16, i16, i8, i8*, %118*, %119*, i16, i32, i32, %114*, %67*, i8, i8, %46, i8, i8, i8, %16*, %105*, %145, i16, i8, i8, i8, i16, i32, i16, %307 }
+%173 = type <{ %38, i16, i16, i16, i8, i8, i16, i16, %116*, i32*, i16*, i8*, i8, i8, [2 x i8], i32*, i8, i8, i8, i8, %105*, i8, [3 x i8] }>
+%174 = type { %24 }
+%175 = type { %24 }
+%176 = type <{ %160, i8, [3 x i8] }>
+%177 = type { i32, %24, i32 }
+%178 = type { [8 x i64] }
+%179 = type { i32, i32, %24, %24, i32, i32 }
+%180 = type opaque
+%181 = type { i16, %12*, [14 x i32], [8 x i32], i32, i32, %105* }
+%182 = type { %183*, %184*, %145*, [2 x [4 x %178]], i8, i8, i8, [10 x %185], %307, %186, i32, i32, i8, i16, i16, i32, i32, %23*, [9 x i8], [16 x %190]*, %194, [2 x %195], %109*, %295* }
+%183 = type { [5 x %39], [24 x i8], [1 x [256 x %39]] }
+%184 = type { [2 x [12 x %92]] }
+%185 = type { i32, i32, i8*, %307 }
+%186 = type { [114 x [22 x i8]], [2 x [22 x i8]], %187, %189 }
+%187 = type { [4 x [4 x [114 x i8]]], [4 x %188], [88 x i8], [4 x [114 x i8]] }
+%188 = type { [4 x [116 x [3 x i8]]] }
+%189 = type { [4 x [8 x i8]], [4 x [8 x [3 x i8]]], [8 x i8] }
+%190 = type { %191, i16, i32, i16, [22 x i8] }
+%191 = type { %192 }
+%192 = type { %193 }
+%193 = type { [1024 x i16], [1024 x i16] }
+%194 = type { i16, i16, i16, %30, %32*, %32*, i16, i8, %24, %39 }
+%195 = type <{ i8, i8, i16, %24, i8, i8, i8, i8, i16, [2 x i8], %32*, i8, i8, [2 x i8] }>
+%196 = type { i32, i8, i8, i8, i8, i8, i8, i8, i8 }
+%197 = type <{ %198, %200, %203, %206, [20 x i8], %207, %236, i16, i16, i8, [3 x i8], [4 x i32], [4 x i32], [8 x [4 x i32]], [8 x i32], [4 x i32], [22 x i32], i8, i8, i8, [2 x i8], [2 x i8], i8, %23*, [44 x i32], [8 x i32], [8 x i32], [2508 x i8], [44 x i8], [456 x i8], [456 x i8], [8 x i8], [8 x i8], i16, i8, i8, i16, i16, i16, [2 x i8], %237, %237, %237, [5 x %238], [5 x %239], %240, i8, i8, [2 x i8], %251, %251, [4 x i8], [8 x [11 x i32]], %307, %150*, %105*, %182*, %196*, %241, [4 x i8], %242, %243, %307, %249, %250, [10 x %251], %251, [4416 x i8], [5 x [5 x [352 x i8]]], [5 x i8], [7579 x i8] }>
+%198 = type { [8 x %199] }
+%199 = type { i16, i8, i8 }
+%200 = type <{ i64, i8, [3 x i8], i32, i32, i32, %201, %201, %201*, i32, i32, i32, i8, i8, i8, [5 x i8], [57 x %39], %202, [7 x i8] }>
+%201 = type { i64, [57 x %39] }
+%202 = type { i8 }
+%203 = type <{ [4 x %204], [4 x %204], %205, %205, i8, [5 x i8] }>
+%204 = type { [4 x [2 x i64]], [12 x i16], i8 }
+%205 = type { i8 }
+%206 = type { i32 }
+%207 = type { [22 x [114 x i8]], [2 x [22 x i8]], [5 x %208], %43, %209, %211, %232, %233, [29 x %210], i32, i32, i32, %234, [4 x i8], i8, i8, i32, [24 x i8] }
+%208 = type { [5 x %39], [164 x %39], [144 x float], [2 x [2 x %39]], i32, [116 x i8], i8, i8 }
+%209 = type { [87 x %39], %210*, i32, i32, i32, [2 x i8], i16, i32, i16, i8, i8 }
+%210 = type { i32 }
+%211 = type { %212, %231 }
+%212 = type { %213 }
+%213 = type { %214, %215, %215, %217, %230, [32 x %39] }
+%214 = type { %39, i16*, i16*, %39*, i8 }
+%215 = type { %216 }
+%216 = type { [2 x [16 x i64]], [2 x [16 x i32]] }
+%217 = type { %218 }
+%218 = type { %219, %222, %223, %225, %228, %229 }
+%219 = type { %220, %221, [30 x %39], float* }
+%220 = type { [150 x %39], [150 x i16] }
+%221 = type { [5 x %39], [5 x i16] }
+%222 = type { [48 x i32], [48 x i32], [32 x %39], [5 x i16], [5 x i32] }
+%223 = type { %39*, i16*, i16, i32, i8, %224* }
+%224 = type { i32, i32, i32, i8 }
+%225 = type { %39*, i8, %226*, i8, i16*, %227*, %214*, i8, i32, i32, i32, i8, i8, i32, %206* }
+%226 = type { i8, i8, i8, i32, i8, i32 }
+%227 = type { %39*, %39*, i32, i8, i8, i8, i8, i8 }
+%228 = type { [87 x %39], [164 x %39], [167 x %39] }
+%229 = type { %39*, i8, i8 }
+%230 = type { [29 x %39] }
+%231 = type { [13 x %39], [13 x %210] }
+%232 = type { %39*, i8, i8, i8, i8, %226*, i8, i8, %224*, i8, i8, %206*, i8, i8 }
+%233 = type { i32, i32, float* }
+%234 = type { %235 }
+%235 = type { i16 }
+%236 = type <{ [57 x %39], [57 x %39], [2 x i8], [2 x i8], [2 x i32], i8, [3 x i8], %268*, [4 x i8] }>
+%237 = type { i32, i32, i32 }
+%238 = type { %39*, %39*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %46, i8, i8, i8, i8, i8, i16, i16, i16, i16, i8, i8, i16, i16, i8, i32, i8, i8, i32, i32, i8, i8, i16, i8*, %253*, %207*, i8*, i32*, i16*, i32*, i32*, i32, %69*, %91*, %69*, %92*, %92*, i8, [2 x i8], i32, i8, i8, i16, i8, i8, i32, i8, i16, %145, i8, %0*, %3*, %182*, %16*, %206*, %249*, i8, %241*, %17, [2 x i8], i8*, i8* }
+%239 = type { i8*, i8*, i32*, %50*, i16, i16, i16, i16, i16, i16, i8, [4 x i8], [2 x i16], %48, i16, i16, i16, i16, [2 x i16], i16, i32, i32, i8, i8, i8, i8, i8, i8, i16, i16, [4 x i8*], i64*, i8, i8, i8, %240*, i8*, i8*, %207*, i8, i8, i8*, i8, i8*, i32 }
+%240 = type { [2 x [16 x %39]], [265 x %39], [1368 x %210] }
+%241 = type { i32, i32, i32, i32, i8, i32, i32, i32, i32, i32 }
+%242 = type <{ %43, [456 x i8], [228 x i8], [456 x i8], [4 x i8] }>
+%243 = type { %244*, i32, i32, i32, i32, i32, i32, i8, i8, %23*, %247, %248 }
+%244 = type { [128 x %245], [2048 x i16] }
+%245 = type { %246, [115 x i64] }
+%246 = type { i16, i8, i32 }
+%247 = type { i32, i32, i32, i32, i8, i8, i8, i8 }
+%248 = type { i32, i32, i32, i32, i8, i8, i32 }
+%249 = type { %23*, i8, i8, [179 x %210], [20 x %210], i16 }
+%250 = type { [8 x i16], [72 x i8], [120 x i8], [3 x i32] }
+%251 = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i16, i8, i8, i8, %48, i8, i32, i8, i32*, [4 x %252], [8 x i32], i8, %238*, %239*, %50*, [8 x i8], %93*, i32, i32, i32, i8, i8, i8, i8, %40*, i8*, i8, i8, i8, i8, i8, [2 x i8], i8, [36 x i8], [36 x i8], i32 }
+%252 = type { i8*, i8 }
+%253 = type { [5 x %254], i8, i8, i8, [114 x i8], %23*, %23*, [48 x i8], %255, %186*, i16, i16, [4 x i16], [4 x i16], i16, %182*, %1*, %105*, [216 x i8] }
+%254 = type { %32*, %37*, i32, i8 }
+%255 = type { [16 x %256], %257, [64 x i8] }
+%256 = type { i64 }
+%257 = type { [8 x i64] }
+%258 = type { i32, %170, [8 x i8], %171, %172, [212 x i8], %164, i8, %165, %168, %169, [100 x %39], %23*, i32, i16, [786 x i64], i16, [2 x i8], [2 x %259], i8, %307, %182*, %105*, %260*, %261, [136 x i8] }
+%259 = type <{ %40, %24, i32, i8, [3 x i8] }>
+%260 = type { %141*, i8, %23*, i32, i32, i32, i32, i32, i32, i32, i32, %295*, %3*, %105*, %2* }
+%261 = type { [2 x %262], %39, %39, %263, [66 x %39], %264, %265, [5 x i16], i32, i8, i16, i16, i32, i16, i16 }
+%262 = type { [170 x %39] }
+%263 = type { [164 x %39] }
+%264 = type { [162 x %210] }
+%265 = type { [312 x %39] }
+%266 = type { %23*, %182*, %260* }
+%267 = type { %23*, %182*, %260* }
+%268 = type { %269, i32, i8, [24 x %277], %23*, i32, i32, %68*, %277*, [24 x i32], i32*, [8 x i16], %96, [6 x i8*], %307, %105*, %23*, %1*, [4 x i8] }
+%269 = type <{ [2 x %270], [2 x %30], [8 x %30], [24 x %30], [2 x %162*], i8, i8, [2 x i8], [2 x %172*], i8, [3 x i8], i32, i32, i32, i16, i16, i32, i8, i8, i16, i16, i8, i8, i8, i8, i8, [8 x i8], i8, [8 x %276], [8 x %69], [8 x %50], i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, [8 x i16], [8 x i16], %93*, %93*, i8, i8, i16, i8, [3 x i8], %24, i8, i8, [2 x i8], %24, %67*, i8, i8, i8, i8, i8, i8, i8, i8, %40*, %91* }>
+%270 = type { %120*, [16 x %30], %30, [8 x %30], [1 x %30], [2 x %158], i8, i8, %30, %30, %271*, i16, %121* }
+%271 = type { i16, %272, i16, [4 x %273], i16, [4 x %275], i32, i32, i32, i32, i8, i8, i32, i32 }
+%272 = type { i16 }
+%273 = type { %274, %43* }
+%274 = type { i16 }
+%275 = type { i16, i16, i32, i32, i8, i16, i16, i8 }
+%276 = type { i8, %40* }
+%277 = type { i8, %30, i8, i8, i8, i32 }
+%278 = type { i16*, %1* }
+%279 = type <{ %280, %280, [16 x %282], %3*, %24, i32, %307, %283, i32, %289, i8, i8, i8, i8, i8, [3 x i8], i32, i32, %24, i8, i8, i8, i8 }>
+%280 = type { %281 }
+%281 = type { %281*, %281* }
+%282 = type <{ %281, %24, %24, %40, i8, [3 x i8] }>
+%283 = type { i32, %284, i32, %286, %287 }
+%284 = type { i8, [5 x %285] }
+%285 = type { i8, i8 }
+%286 = type { i16, i16 }
+%287 = type { i32, %288 }
+%288 = type { i32, i32 }
+%289 = type { %290 }
+%290 = type { %291, i32**, i32, i32, i32 }
+%291 = type { %292, %294 }
+%292 = type { %293 }
+%293 = type { i8 }
+%294 = type { i8 }
+%295 = type { i8, i8, %23*, [16 x %296], i8, %105*, %182*, %1*, %260, i8, i8, i8, i8, i8, i8, %23* }
+%296 = type { i8, i8, %297, i16 }
+%297 = type { i8, %298, i8* }
+%298 = type { i8 }
+%299 = type { %300, %302 }
+%300 = type { %301, i32 }
+%301 = type { i32, i32, i32, i32, i64, i32, i32, i32, i32, i32, i8, [4 x i32], [4 x i32] }
+%302 = type { i32, i32, i32, i32, i32, [4 x i32] }
+%303 = type { i32, i32, i8 }
+%304 = type { %305, i32, i8 }
+%305 = type { %306 }
+%306 = type { i32 }
+%307 = type { i32 }
+
+define void @f0(%0* %a0) align 2 #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ %v7, %b1 ], [ 0, %b0 ]
+  %v1 = getelementptr inbounds %0, %0* %a0, i32 0, i32 2, i32 undef, i32 %v0
+  %v2 = getelementptr inbounds %39, %39* %v1, i32 0, i32 0
+  %v3 = load i64, i64* %v2, align 8
+  %v4 = call i64 @llvm.hexagon.S2.brevp(i64 %v3) #1
+  store i64 %v4, i64* %v2, align 8
+  %v5 = bitcast %39* %v1 to [2 x i32]*
+  %v6 = getelementptr inbounds [2 x i32], [2 x i32]* %v5, i32 0, i32 1
+  store i32 0, i32* %v6, align 4
+  %v7 = add nuw nsw i32 %v0, 1
+  %v8 = icmp eq i32 %v7, 2
+  br i1 %v8, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.brevp(i64) #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-loop-carried.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-loop-carried.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-loop-carried.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-loop-carried.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,46 @@
+; RUN: llc -march=hexagon -fp-contract=fast -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; A Phi that depends on another Phi is loop carried.
+
+define void @f0() #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  br i1 undef, label %b1, label %b2
+
+b2:                                               ; preds = %b1
+  br i1 undef, label %b3, label %b8
+
+b3:                                               ; preds = %b2
+  br i1 undef, label %b4, label %b5
+
+b4:                                               ; preds = %b4, %b3
+  %v0 = phi i32 [ %v5, %b4 ], [ 2, %b3 ]
+  %v1 = phi float [ %v4, %b4 ], [ undef, %b3 ]
+  %v2 = phi float [ %v1, %b4 ], [ undef, %b3 ]
+  %v3 = fsub float 0.000000e+00, %v2
+  %v4 = fadd float %v3, undef
+  %v5 = add nsw i32 %v0, 1
+  %v6 = icmp eq i32 %v5, undef
+  br i1 %v6, label %b5, label %b4
+
+b5:                                               ; preds = %b4, %b3
+  %v7 = phi float [ undef, %b3 ], [ %v1, %b4 ]
+  br i1 false, label %b6, label %b7
+
+b6:                                               ; preds = %b5
+  br label %b7
+
+b7:                                               ; preds = %b6, %b5
+  br label %b9
+
+b8:                                               ; preds = %b2
+  ret void
+
+b9:                                               ; preds = %b9, %b7
+  br label %b9
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-loopval.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-loopval.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-loopval.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-loopval.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,57 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Check that we correctly rename instructions that use a Phi's loop value,
+; and the Phi and loop value are defined after the instruction.
+
+%s.0 = type { [4 x i8], i16, i16, i32, [8 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [4 x %s.1], [4 x i8], i32, i32, [4 x i8], [14 x %s.2] }
+%s.1 = type { i32, i32 }
+%s.2 = type { [4 x i8] }
+
+; Function Attrs: nounwind
+define void @f0(%s.0* nocapture %a0) #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0
+  br label %b8
+
+b3:                                               ; preds = %b9
+  unreachable
+
+b4:                                               ; preds = %b9
+  br i1 undef, label %b7, label %b5
+
+b5:                                               ; preds = %b4
+  br i1 undef, label %b6, label %b7
+
+b6:                                               ; preds = %b6, %b5
+  %v0 = phi i32 [ %v10, %b6 ], [ 0, %b5 ]
+  %v1 = load i32, i32* undef, align 4
+  %v2 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 29, i32 %v0
+  %v3 = bitcast %s.2* %v2 to i32*
+  %v4 = load i32, i32* %v3, align 4
+  %v5 = and i32 %v1, 65535
+  %v6 = and i32 %v4, -65536
+  %v7 = or i32 %v6, %v5
+  %v8 = and i32 %v7, -2031617
+  %v9 = or i32 %v8, 0
+  store i32 %v9, i32* %v3, align 4
+  %v10 = add nsw i32 %v0, 1
+  %v11 = icmp eq i32 %v10, undef
+  br i1 %v11, label %b7, label %b6
+
+b7:                                               ; preds = %b6, %b5, %b4
+  ret void
+
+b8:                                               ; preds = %b8, %b2
+  br i1 undef, label %b9, label %b8
+
+b9:                                               ; preds = %b8
+  br i1 undef, label %b3, label %b4
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-lots-deps.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-lots-deps.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-lots-deps.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-lots-deps.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,54 @@
+; RUN: llc -march=hexagon -enable-pipeliner -stats -o /dev/null < %s 2>&1 | FileCheck %s --check-prefix=STATS
+; REQUIRES: asserts
+
+; STATS: 1 pipeliner        - Number of loops software pipelined
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0) #0 {
+b0:
+  %v0 = icmp slt i32 %a0, 123469
+  br i1 %v0, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v1 = phi i64 [ undef, %b1 ], [ %v12, %b2 ]
+  %v2 = phi i64 [ undef, %b1 ], [ %v10, %b2 ]
+  %v3 = phi i32 [ 0, %b1 ], [ %v13, %b2 ]
+  %v4 = phi i32 [ undef, %b1 ], [ %v9, %b2 ]
+  %v5 = phi i64 [ undef, %b1 ], [ %v7, %b2 ]
+  %v6 = phi i64 [ undef, %b1 ], [ %v11, %b2 ]
+  %v7 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v5, i64 %v6, i64 %v6)
+  %v8 = tail call i64 @llvm.hexagon.S2.packhl(i32 undef, i32 %v4)
+  %v9 = load i32, i32* undef, align 4, !tbaa !0
+  %v10 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v2, i64 %v6, i64 %v8)
+  %v11 = tail call i64 @llvm.hexagon.S2.packhl(i32 %v9, i32 undef)
+  %v12 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v1, i64 %v6, i64 %v11)
+  %v13 = add nsw i32 %v3, 1
+  %v14 = icmp eq i32 %v13, undef
+  br i1 %v14, label %b3, label %b2
+
+b3:                                               ; preds = %b2
+  %v15 = lshr i64 %v12, 32
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  %v16 = phi i64 [ %v10, %b3 ], [ undef, %b0 ]
+  %v17 = phi i64 [ %v7, %b3 ], [ undef, %b0 ]
+  unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vdmacs.s0(i64, i64, i64) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.packhl(i32, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-maxstart.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-maxstart.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-maxstart.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-maxstart.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,38 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+
+; Test that the MinStart computation, which is based upon the length
+; of the chain edges, is computed correctly. A bug in the code allowed
+; two instuctions that have a chain edge to be scheduled more than II
+; instructions apart. In this test, if two stores appear before the
+; store, then that is a bug.
+
+; CHECK: r{{[0-9]+}} = memw([[REG0:r([0-9]+)]]+#12)
+; CHECK-NOT: r{{[0-9]+}} = memw([[REG0]]+#12)
+; CHECK: memw([[REG0]]+#12) = r{{[0-9]+}}
+
+%s.0 = type { i64, i32, i32, i32, i8* }
+
+ at g0 = external global %s.0, align 8
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  %v0 = load i32, i32* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 8
+  %v1 = ashr i32 %v0, 3
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v2 = phi i32 [ %v5, %b1 ], [ 0, %b0 ]
+  %v3 = load i8*, i8** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 4), align 4
+  %v4 = getelementptr inbounds i8, i8* %v3, i32 -1
+  store i8* %v4, i8** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 4), align 4
+  store i8 0, i8* %v4, align 1
+  %v5 = add nsw i32 %v2, 1
+  %v6 = icmp eq i32 %v5, %v1
+  br i1 %v6, label %b2, label %b1
+
+b2:                                               ; preds = %b1, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-more-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-more-phi.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-more-phi.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-more-phi.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,61 @@
+; RUN: llc -march=hexagon --enable-pipeliner -hexagon-expand-condsets=0 < %s
+; REQUIRES: asserts
+
+; Disable expand-condsets because it will assert on undefined registers.
+
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0
+  br i1 undef, label %b3, label %b4
+
+b3:                                               ; preds = %b3, %b2
+  br i1 undef, label %b4, label %b3
+
+b4:                                               ; preds = %b3, %b2
+  %v0 = ashr i32 undef, 25
+  %v1 = mul nsw i32 %v0, 2
+  %v2 = load i8, i8* undef, align 1
+  br i1 undef, label %b5, label %b10
+
+b5:                                               ; preds = %b4
+  br i1 undef, label %b6, label %b9
+
+b6:                                               ; preds = %b5
+  br label %b7
+
+b7:                                               ; preds = %b7, %b6
+  br i1 undef, label %b7, label %b8
+
+b8:                                               ; preds = %b7
+  br i1 undef, label %b10, label %b9
+
+b9:                                               ; preds = %b9, %b8, %b5
+  %v3 = phi i8 [ %v7, %b9 ], [ undef, %b8 ], [ %v2, %b5 ]
+  %v4 = phi i32 [ %v8, %b9 ], [ undef, %b8 ], [ 1, %b5 ]
+  %v5 = add i32 %v4, undef
+  %v6 = load i8, i8* undef, align 1
+  %v7 = select i1 undef, i8 %v6, i8 %v3
+  %v8 = add nsw i32 %v4, 1
+  %v9 = icmp eq i32 %v8, %v1
+  br i1 %v9, label %b10, label %b9
+
+b10:                                              ; preds = %b9, %b8, %b4
+  %v10 = phi i8 [ %v2, %b4 ], [ undef, %b8 ], [ %v7, %b9 ]
+  br i1 false, label %b11, label %b12
+
+b11:                                              ; preds = %b10
+  unreachable
+
+b12:                                              ; preds = %b10
+  br label %b13
+
+b13:                                              ; preds = %b13, %b12
+  br label %b13
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-multi-phi-refs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-multi-phi-refs.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-multi-phi-refs.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-multi-phi-refs.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,61 @@
+; RUN: llc -march=hexagon -O3 < %s
+; REQUIRES: asserts
+
+; Test that we generate the correct names for Phis when there is
+; a Phi that references a Phi that references another Phi. For example,
+;  v6 = phi(v1, v9)
+;  v7 = phi(v0, v6)
+;  v8 = phi(v2, v7)
+
+; Function Attrs: nounwind
+define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i8* noalias nocapture %a3, i32 %a4) #0 {
+b0:
+  %v0 = add i32 %a1, -1
+  %v1 = getelementptr inbounds i8, i8* %a0, i32 0
+  %v2 = getelementptr inbounds i8, i8* %a0, i32 undef
+  %v3 = getelementptr inbounds i8, i8* %a3, i32 0
+  br i1 undef, label %b1, label %b4
+
+b1:                                               ; preds = %b1, %b0
+  br i1 undef, label %b1, label %b2
+
+b2:                                               ; preds = %b1
+  %v4 = getelementptr inbounds i8, i8* %a0, i32 undef
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v5 = phi i8* [ %v10, %b3 ], [ %v3, %b2 ]
+  %v6 = phi i8* [ %v25, %b3 ], [ %v4, %b2 ]
+  %v7 = phi i8* [ %v6, %b3 ], [ %v2, %b2 ]
+  %v8 = phi i8* [ %v7, %b3 ], [ %v1, %b2 ]
+  %v9 = phi i32 [ %v26, %b3 ], [ 1, %b2 ]
+  %v10 = getelementptr inbounds i8, i8* %v5, i32 %a4
+  %v11 = getelementptr inbounds i8, i8* %v8, i32 -1
+  %v12 = load i8, i8* %v11, align 1, !tbaa !0
+  %v13 = zext i8 %v12 to i32
+  %v14 = add nuw nsw i32 %v13, 0
+  %v15 = add nuw nsw i32 %v14, 0
+  %v16 = add nuw nsw i32 %v15, 0
+  %v17 = load i8, i8* %v6, align 1, !tbaa !0
+  %v18 = zext i8 %v17 to i32
+  %v19 = add nuw nsw i32 %v16, %v18
+  %v20 = add nuw nsw i32 %v19, 0
+  %v21 = mul nsw i32 %v20, 7282
+  %v22 = add nsw i32 %v21, 32768
+  %v23 = lshr i32 %v22, 16
+  %v24 = trunc i32 %v23 to i8
+  store i8 %v24, i8* %v10, align 1, !tbaa !0
+  %v25 = getelementptr inbounds i8, i8* %v6, i32 %a2
+  %v26 = add i32 %v9, 1
+  %v27 = icmp eq i32 %v26, %v0
+  br i1 %v27, label %b4, label %b3
+
+b4:                                               ; preds = %b3, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-new-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-new-phi.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-new-phi.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-new-phi.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,51 @@
+; RUN: llc -march=hexagon -enable-pipeliner -pipeliner-max-stages=2 < %s | FileCheck %s
+
+; Test that the generatePhi code doesn't rename a a Phi instruction that's defined
+; in the same block.  The bug causes a Phi to incorrectly depend on another Phi.
+
+; CHECK: loop0(.LBB0_[[LOOP:.]],
+; CHECK: .LBB0_[[LOOP]]:
+; CHECK: memh([[REG0:(r[0-9]+)]]++#2:circ
+; CHECK: = mem{{u?}}h([[REG0]]+#0)
+; CHECK: endloop0
+
+; Function Attrs: argmemonly nounwind
+declare i8* @llvm.hexagon.circ.sthhi(i8*, i32, i32, i32) #1
+
+; Function Attrs: nounwind optsize
+define signext i16 @f0(i16* %a0, i16* %a1, i16 signext %a2, i16 signext %a3) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i16* [ %v10, %b1 ], [ %a1, %b0 ]
+  %v1 = phi i32 [ %v13, %b1 ], [ 1, %b0 ]
+  %v2 = phi i16 [ %v12, %b1 ], [ 0, %b0 ]
+  %v3 = bitcast i16* %v0 to i8*
+  %v4 = add nsw i32 %v1, 10
+  %v5 = getelementptr inbounds i16, i16* %a0, i32 %v4
+  %v6 = load i16, i16* %v5, align 2, !tbaa !0
+  %v7 = sext i16 %v6 to i32
+  %v8 = add nsw i32 %v7, 40000
+  %v9 = tail call i8* @llvm.hexagon.circ.sthhi(i8* %v3, i32 %v8, i32 117441022, i32 2)
+  %v10 = bitcast i8* %v9 to i16*
+  %v11 = load i16, i16* %v10, align 2, !tbaa !0
+  %v12 = add i16 %v11, %v2
+  %v13 = add i32 %v1, 1
+  %v14 = icmp eq i32 %v13, 1000
+  br i1 %v14, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  br label %b3
+
+b3:                                               ; preds = %b2
+  ret i16 %v12
+}
+
+attributes #0 = { nounwind optsize "target-cpu"="hexagonv55" }
+attributes #1 = { argmemonly nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-node-order.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-node-order.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-node-order.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-node-order.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,55 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Test that we include all the nodes in the final node ordering
+; computation. This test creates two set of nodes that are processed
+; by computeNodeOrder().
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0) #0 {
+b0:
+  %v0 = add nsw i32 undef, 4
+  %v1 = ashr i32 %a0, 1
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v2 = phi i64 [ %v5, %b1 ], [ 0, %b0 ]
+  %v3 = phi i64 [ %v9, %b1 ], [ undef, %b0 ]
+  %v4 = phi i32 [ %v10, %b1 ], [ 0, %b0 ]
+  %v5 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v2, i64 %v3, i64 undef)
+  %v6 = tail call i64 @llvm.hexagon.A2.combinew(i32 0, i32 0)
+  %v7 = tail call i64 @llvm.hexagon.S2.shuffeh(i64 %v6, i64 undef)
+  %v8 = trunc i64 %v7 to i32
+  %v9 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v8, i32 undef)
+  %v10 = add nsw i32 %v4, 1
+  %v11 = icmp eq i32 %v10, %v1
+  br i1 %v11, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v12 = trunc i64 %v5 to i32
+  %v13 = inttoptr i32 %v0 to i32*
+  store i32 %v12, i32* %v13, align 4, !tbaa !0
+  call void @llvm.trap()
+  unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vdmacs.s0(i64, i64, i64) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.shuffeh(i64, i64) #1
+
+; Function Attrs: noreturn nounwind
+declare void @llvm.trap() #2
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { noreturn nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-order-carried.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-order-carried.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-order-carried.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-order-carried.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,86 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that we order instruction within a packet correctly. In this case,
+; we added a definition of a value after the use in a packet, which
+; caused an assert.
+
+define void @f0(i32 %a0) {
+b0:
+  %v0 = ashr i32 %a0, 1
+  br i1 undef, label %b3, label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = phi i32 [ %v23, %b1 ], [ undef, %b0 ]
+  %v2 = phi i64 [ %v14, %b1 ], [ 0, %b0 ]
+  %v3 = phi i64 [ %v11, %b1 ], [ 0, %b0 ]
+  %v4 = phi i32 [ %v25, %b1 ], [ 0, %b0 ]
+  %v5 = phi i32 [ %v6, %b1 ], [ undef, %b0 ]
+  %v6 = phi i32 [ %v20, %b1 ], [ undef, %b0 ]
+  %v7 = phi i32 [ %v24, %b1 ], [ undef, %b0 ]
+  %v8 = tail call i32 @llvm.hexagon.A2.combine.lh(i32 %v6, i32 %v5)
+  %v9 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v8, i32 undef)
+  %v10 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v1, i32 undef)
+  %v11 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v3, i64 %v9, i64 undef)
+  %v12 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v5, i32 %v5)
+  %v13 = tail call i64 @llvm.hexagon.S2.valignib(i64 %v10, i64 undef, i32 2)
+  %v14 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v2, i64 %v12, i64 %v13)
+  %v15 = inttoptr i32 %v7 to i16*
+  %v16 = load i16, i16* %v15, align 2
+  %v17 = sext i16 %v16 to i32
+  %v18 = add nsw i32 %v7, -8
+  %v19 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 undef, i64 %v12, i64 0)
+  %v20 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 %v17, i32 %v1)
+  %v21 = inttoptr i32 %v18 to i16*
+  %v22 = load i16, i16* %v21, align 2
+  %v23 = sext i16 %v22 to i32
+  %v24 = add nsw i32 %v7, -16
+  %v25 = add nsw i32 %v4, 1
+  %v26 = icmp eq i32 %v25, %v0
+  br i1 %v26, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v27 = phi i64 [ %v19, %b1 ]
+  %v28 = phi i64 [ %v14, %b1 ]
+  %v29 = phi i64 [ %v11, %b1 ]
+  %v30 = trunc i64 %v27 to i32
+  %v31 = trunc i64 %v28 to i32
+  %v32 = lshr i64 %v29, 32
+  %v33 = trunc i64 %v32 to i32
+  br label %b3
+
+b3:                                               ; preds = %b2, %b0
+  %v34 = phi i32 [ %v30, %b2 ], [ undef, %b0 ]
+  %v35 = phi i32 [ %v31, %b2 ], [ undef, %b0 ]
+  %v36 = phi i32 [ %v33, %b2 ], [ undef, %b0 ]
+  %v37 = bitcast i8* undef to i32*
+  store i32 %v35, i32* %v37, align 4
+  %v38 = getelementptr inbounds i8, i8* null, i32 8
+  %v39 = bitcast i8* %v38 to i32*
+  store i32 %v34, i32* %v39, align 4
+  %v40 = bitcast i8* undef to i32*
+  store i32 %v36, i32* %v40, align 4
+  call void @llvm.trap()
+  unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.A2.combinew(i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vdmacs.s0(i64, i64, i64) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.combine.lh(i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.valignib(i64, i64, i32) #0
+
+; Function Attrs: noreturn nounwind
+declare void @llvm.trap() #1
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { noreturn nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-order-deps1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-order-deps1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-order-deps1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-order-deps1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,48 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Check that the dependences are order correctly, and the list can be
+; updated when the instruction to insert has a def and use conflict.
+
+; Function Attrs: nounwind
+define fastcc void @f0() #0 {
+b0:
+  br i1 undef, label %b7, label %b1
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b2, label %b4
+
+b2:                                               ; preds = %b1
+  %v0 = load i16, i16* undef, align 2
+  br label %b5
+
+b3:                                               ; preds = %b5
+  br label %b4
+
+b4:                                               ; preds = %b3, %b1
+  %v1 = phi i16 [ %v11, %b3 ], [ 0, %b1 ]
+  br i1 false, label %b7, label %b6
+
+b5:                                               ; preds = %b5, %b2
+  %v2 = phi i16 [ %v3, %b5 ], [ undef, %b2 ]
+  %v3 = phi i16 [ 0, %b5 ], [ %v0, %b2 ]
+  %v4 = phi i16 [ %v2, %b5 ], [ undef, %b2 ]
+  %v5 = phi i16 [ %v11, %b5 ], [ 0, %b2 ]
+  %v6 = phi i32 [ %v12, %b5 ], [ undef, %b2 ]
+  %v7 = or i16 0, %v5
+  %v8 = lshr i16 %v4, 8
+  %v9 = or i16 %v8, %v7
+  %v10 = or i16 0, %v9
+  %v11 = or i16 0, %v10
+  %v12 = add nsw i32 %v6, -32
+  %v13 = icmp sgt i32 %v12, 31
+  br i1 %v13, label %b5, label %b3
+
+b6:                                               ; preds = %b4
+  br label %b7
+
+b7:                                               ; preds = %b6, %b4, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-order-deps3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-order-deps3.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-order-deps3.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-order-deps3.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,30 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+
+; Function Attrs: noinline nounwind ssp
+define fastcc void @f0() #0 {
+b0:
+  %v0 = add i32 0, 39
+  %v1 = and i32 %v0, -8
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v2 = phi i32 [ %v10, %b1 ], [ undef, %b0 ]
+  %v3 = phi i8* [ %v7, %b1 ], [ undef, %b0 ]
+  %v4 = ptrtoint i8* %v3 to i32
+  %v5 = add i32 %v4, %v1
+  %v6 = bitcast i8* %v3 to i32*
+  store i32 %v5, i32* %v6, align 4
+  %v7 = getelementptr inbounds i8, i8* %v3, i32 %v1
+  %v8 = getelementptr inbounds i8, i8* %v3, i32 0
+  %v9 = bitcast i8* %v8 to i32*
+  store i32 1111638594, i32* %v9, align 4
+  %v10 = add nsw i32 %v2, -1
+  %v11 = icmp sgt i32 %v10, 0
+  br i1 %v11, label %b1, label %b2
+
+b2:                                               ; preds = %b1, %b0
+  ret void
+}
+
+attributes #0 = { noinline nounwind ssp "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-order-deps4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-order-deps4.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-order-deps4.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-order-deps4.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,68 @@
+; RUN: llc -O2 -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that the final instruction ordering code does not result in infinite
+; recursion (a segmentation fault). The problem is that the order heuristics
+; did not properly take into account the stage in which an instruction is
+; scheduled.
+
+%0 = type { %1, %4, %9, %28 }
+%1 = type { i8, [32 x %2] }
+%2 = type { i8, %3, i8, i8, i16, i8, [20 x i16], [20 x i16] }
+%3 = type { i16, i8 }
+%4 = type { i8, [64 x %5], [64 x %5*] }
+%5 = type { i8, i8, i8*, %6 }
+%6 = type { %7 }
+%7 = type { i8*, %3, i8, i8, i8, i8, i16, i8, i8, i8, i16, i32, i8, [3 x i8], [3 x i16], i16, i8, i16, i8, %8, i16, i8, i16 }
+%8 = type { i8, i8 }
+%9 = type { i8, i8, %10*, i8, [8 x %7*], i8, i8, i8, i8, i8, %7*, i8, %7*, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i32, i32, i32, i32, i32, i32, i32, i8, i8, i16, i8, void (i8)*, i8, i8, i8, i8, i8, i8 }
+%10 = type { i8, i8, i8, i8, i8, %11, %12, %13, %14 }
+%11 = type { i8, i16, i16 }
+%12 = type { i8, i16, i8* }
+%13 = type { i8, i16 }
+%14 = type { %15, %20, %25 }
+%15 = type { i8, i8, %16, i8, [18 x %17] }
+%16 = type { i8, i16, i16 }
+%17 = type { i8, i8, [10 x %3], [10 x i16], [10 x i16], [10 x i8], %18* }
+%18 = type { %19, i16, i16, %19, i16 }
+%19 = type { i16, i16, i16, i8 }
+%20 = type { i8, i8, %21 }
+%21 = type { i8*, %22, %23 }
+%22 = type { %3, i8, i8, i16, i16, i16, i8, i16 }
+%23 = type { [2 x %24], [4 x i8] }
+%24 = type { i8, %3, i16, i16, i16, i16, %18* }
+%25 = type { i8, i8, [8 x %26] }
+%26 = type { i8*, %27, %24 }
+%27 = type { %3, i8, i16, i16, i16 }
+%28 = type { [2 x %29], [2 x i16], i8, i8*, i16, i8, i8, %31*, %32*, %33*, %33*, [3 x %34*], i8, [2 x i8], i8, i8, [2 x i8], [2 x i8], [3 x i8] }
+%29 = type <{ %30, i8, [1000 x i8] }>
+%30 = type { i16, i16, [2 x i32] }
+%31 = type <{ i8, i8, i16, i8 }>
+%32 = type <{ i16, i16, i8, i16 }>
+%33 = type <{ i8, i8, i16, i16, i16, i8, i16, i16 }>
+%34 = type <{ i8, i8, i16, i16, i8, i16, i8, i8, i32, i16, i16, i16 }>
+
+ at g0 = external global [2 x %0], align 8
+
+; Function Attrs: nounwind ssp
+define void @f0() #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
+  %v1 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 undef, i32 1, i32 1, i32 %v0
+  %v2 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 undef, i32 1, i32 2, i32 %v0
+  store %5* %v1, %5** %v2, align 4
+  %v3 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 undef, i32 1, i32 1, i32 %v0, i32 3
+  %v4 = bitcast %6* %v3 to %5**
+  store %5* %v1, %5** %v4, align 4
+  %v5 = add nuw nsw i32 %v0, 1
+  %v6 = icmp eq i32 %v5, 64
+  br i1 %v6, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  ret void
+}
+
+attributes #0 = { nounwind ssp "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-order-deps6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-order-deps6.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-order-deps6.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-order-deps6.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,29 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+%s.0 = type { i64 }
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.A2.combinew(i32, i32) #0
+
+define void @f0(%s.0* noalias nocapture %a0, i32 %a1) local_unnamed_addr {
+b0:
+  %v0 = call i64 @llvm.hexagon.A2.combinew(i32 %a1, i32 %a1)
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = phi i32 [ 0, %b0 ], [ %v6, %b1 ]
+  %v2 = mul nuw nsw i32 %v1, 13
+  %v3 = getelementptr inbounds %s.0, %s.0* %a0, i32 %v2, i32 0
+  %v4 = load i64, i64* %v3, align 8
+  %v5 = add nsw i64 %v4, %v0
+  store i64 %v5, i64* %v3, align 8
+  %v6 = add nuw nsw i32 %v1, 1
+  %v7 = icmp eq i32 %v6, 12
+  br i1 %v7, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  ret void
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-order-prec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-order-prec.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-order-prec.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-order-prec.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,29 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that the pipeliner doesn't assert in orderDependence because
+; the check for OrderAfterDef precedeence is in the wrong spot.
+
+%s.0 = type <{ i8, [20 x %s.1] }>
+%s.1 = type { i16, i16 }
+
+; Function Attrs: nounwind optsize ssp
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ %v3, %b1 ], [ 0, %b0 ]
+  %v1 = getelementptr inbounds %s.0, %s.0* undef, i32 0, i32 1, i32 %v0, i32 0
+  store i16 0, i16* %v1, align 1
+  %v2 = getelementptr inbounds %s.0, %s.0* undef, i32 0, i32 1, i32 %v0, i32 1
+  store i16 -1, i16* %v2, align 1
+  %v3 = add nsw i32 %v0, 1
+  %v4 = icmp eq i32 %v3, 20
+  br i1 %v4, label %b2, label %b1
+
+b2:                                               ; preds = %b1, %b0
+  ret void
+}
+
+attributes #0 = { nounwind optsize ssp "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-order.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-order.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-order.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-order.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,67 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that when we order instructions in a packet we check for
+; order dependences so that the source of an order dependence
+; appears before the destination.
+
+; CHECK: loop0(.LBB0_[[LOOP:.]],
+; CHECK: .LBB0_[[LOOP]]:
+; CHECK: = memw
+; CHECK: = memw
+; CHECK: memw({{.*}}) =
+; CHECK: = memw
+; CHECK: = memw
+; CHECK: endloop0
+
+ at g0 = external hidden unnamed_addr constant [19 x i8], align 1
+
+; Function Attrs: nounwind optsize
+declare i32 @f0(i8* nocapture readonly, ...) #0
+
+; Function Attrs: nounwind optsize
+declare void @f1(i32*, i32*, i32* nocapture readnone) #0
+
+; Function Attrs: argmemonly nounwind
+declare i8* @llvm.hexagon.circ.stw(i8*, i32, i32, i32) #1
+
+; Function Attrs: nounwind optsize
+define void @f2(i32* %a0, i32* %a1, i32* %a2) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  call void @f1(i32* %a2, i32* %a0, i32* %v0) #2
+  %v1 = bitcast i32* %a1 to i8*
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v2 = phi i32 [ 0, %b0 ], [ %v13, %b1 ]
+  %v3 = phi i32* [ %a2, %b0 ], [ %v16, %b1 ]
+  %v4 = phi i32 [ 0, %b0 ], [ %v14, %b1 ]
+  %v5 = load i32, i32* %a1, align 4, !tbaa !0
+  %v6 = add nsw i32 %v2, %v5
+  %v7 = load i32, i32* %v3, align 4, !tbaa !0
+  %v8 = tail call i8* @llvm.hexagon.circ.stw(i8* %v1, i32 %v7, i32 150995968, i32 4) #3
+  %v9 = bitcast i8* %v8 to i32*
+  %v10 = load i32, i32* %v3, align 4, !tbaa !0
+  %v11 = add nsw i32 %v6, %v10
+  %v12 = load i32, i32* %v9, align 4, !tbaa !0
+  %v13 = add nsw i32 %v11, %v12
+  %v14 = add nsw i32 %v4, 1
+  %v15 = icmp eq i32 %v14, 2
+  %v16 = getelementptr i32, i32* %v3, i32 1
+  br i1 %v15, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v17 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([19 x i8], [19 x i8]* @g0, i32 0, i32 0), i32 %v13) #4
+  ret void
+}
+
+attributes #0 = { nounwind optsize "target-cpu"="hexagonv55" }
+attributes #1 = { argmemonly nounwind }
+attributes #2 = { optsize }
+attributes #3 = { nounwind }
+attributes #4 = { nounwind optsize }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-order1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-order1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-order1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-order1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,35 @@
+; RUN: llc -O2 -march=hexagon < %s
+; REQUIRES: asserts
+
+%0 = type { [2 x [8 x [16 x i8]]], [4 x [16 x %1*]] }
+%1 = type { i32, i32, i8, i8, %2, %6* }
+%2 = type { i32, i32, %3*, i8, i16, i16, i8 }
+%3 = type { i16, i16, %4, i16, i8, i16, %5, i32 }
+%4 = type { i32 }
+%5 = type { i16, i16 }
+%6 = type { %7* }
+%7 = type { [16 x i16], [16 x i16] }
+
+; Function Attrs: norecurse nounwind
+define void @f0(%0* nocapture %a0) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ 0, %b0 ], [ %v6, %b1 ]
+  %v1 = getelementptr inbounds %0, %0* %a0, i32 0, i32 1, i32 3, i32 %v0
+  %v2 = bitcast %1** %v1 to i32*
+  %v3 = load i32, i32* %v2, align 4
+  store i32 %v3, i32* undef, align 4
+  %v4 = getelementptr inbounds %0, %0* %a0, i32 0, i32 1, i32 0, i32 %v0
+  %v5 = bitcast %1** %v4 to i32*
+  store i32 %v3, i32* %v5, align 4
+  %v6 = add nuw nsw i32 %v0, 1
+  %v7 = icmp eq i32 %v6, 16
+  br i1 %v7, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  ret void
+}
+
+attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-phi-chains.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-phi-chains.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-phi-chains.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-phi-chains.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,134 @@
+; RUN: llc -march=hexagon -debug-only=pipeliner < %s -o - 2>&1 | FileCheck %s
+
+; Test that there is a chain edge between two dependent Phis.
+; The pipeliner tries to remove chains between unrelated Phis, but
+; was too aggressive in some cases. When this happens the two Phis may get
+; scheduled too far apart. In this case, the second Phi was scheduled in
+; the next stage.
+
+; CHECK: SU([[SU1:[0-9]+]]): %14:intregs = PHI %{{[0-9]+}}:intregs, %bb.0, %{{[0-9]+}}:intregs, %bb.1
+; CHECK: Successors:
+; CHECK: SU({{.*}}): Data Latency=0
+; CHECK: SU([[SU2:[0-9]+]]): Data Latency=0
+; CHECK: SU([[SU2]]):   %{{[0-9]+}}:intregs = PHI %{{[0-9]+}}:intregs, %bb.0, %14:intregs, %bb.1
+; CHECK: Predecessors:
+; CHECK: SU([[SU1]]): Data Latency=0
+
+%s.0 = type { i16, i8, i32, i8*, i8*, i8*, i8*, i8*, i8*, i32*, [2 x i32], i8*, i8*, i8*, %s.1, i8*, [8 x i8], i8 }
+%s.1 = type { i32, i16, i16 }
+%s.2 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+
+ at g0 = global i32 0, align 4
+ at g1 = global i32 0, align 4
+ at g2 = global i32 0, align 4
+ at g3 = global i32 0, align 4
+ at g4 = global i32 0, align 4
+ at g5 = common global i32 0, align 4
+ at g6 = external global %s.0
+ at g7 = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
+
+; Function Attrs: nounwind
+declare i32 @f0(%s.0* nocapture, i8* nocapture readonly, ...) #0
+
+; Function Attrs: nounwind
+define void @f1(%s.2* nocapture %a0, i32* nocapture readonly %a1, i32* nocapture readonly %a2, i16 signext %a3) #0 {
+b0:
+  %v0 = load i32, i32* %a2, align 4
+  %v1 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v0, i32 2)
+  %v2 = tail call i32 @llvm.hexagon.A2.sath(i32 %v1)
+  store i32 0, i32* @g5, align 4
+  %v3 = load i32, i32* @g0, align 4
+  %v4 = load i32, i32* @g1, align 4
+  %v5 = load i32, i32* @g2, align 4
+  %v6 = load i32, i32* @g3, align 4
+  %v7 = load i32, i32* @g4, align 4
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v8 = phi i32 [ %v7, %b0 ], [ %v52, %b1 ]
+  %v9 = phi i32 [ %v6, %b0 ], [ %v50, %b1 ]
+  %v10 = phi i32 [ %v5, %b0 ], [ %v46, %b1 ]
+  %v11 = phi i32 [ %v4, %b0 ], [ %v44, %b1 ]
+  %v12 = phi i32 [ %v3, %b0 ], [ %v38, %b1 ]
+  %v13 = phi i32 [ 0, %b0 ], [ %v53, %b1 ]
+  %v14 = phi i32* [ %a2, %b0 ], [ %v26, %b1 ]
+  %v15 = phi i32* [ %a1, %b0 ], [ %v19, %b1 ]
+  %v16 = phi i32 [ %v2, %b0 ], [ %v32, %b1 ]
+  %v17 = phi i32 [ 0, %b0 ], [ %v25, %b1 ]
+  %v18 = phi i32 [ 0, %b0 ], [ %v16, %b1 ]
+  %v19 = getelementptr inbounds i32, i32* %v15, i32 1
+  %v20 = load i32, i32* %v15, align 4
+  %v21 = tail call i32 @llvm.hexagon.A2.asrh(i32 %v20)
+  %v22 = shl i32 %v21, 16
+  %v23 = ashr exact i32 %v22, 16
+  %v24 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v23, i32 2)
+  %v25 = tail call i32 @llvm.hexagon.A2.sath(i32 %v24)
+  %v26 = getelementptr inbounds i32, i32* %v14, i32 1
+  %v27 = load i32, i32* %v14, align 4
+  %v28 = tail call i32 @llvm.hexagon.A2.asrh(i32 %v27)
+  %v29 = shl i32 %v28, 16
+  %v30 = ashr exact i32 %v29, 16
+  %v31 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v30, i32 2)
+  %v32 = tail call i32 @llvm.hexagon.A2.sath(i32 %v31)
+  %v33 = shl i32 %v17, 16
+  %v34 = ashr exact i32 %v33, 16
+  %v35 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %v12, i32 %v34, i32 %v34)
+  %v36 = shl i32 %v16, 16
+  %v37 = ashr exact i32 %v36, 16
+  %v38 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %v35, i32 %v37, i32 %v37)
+  store i32 %v38, i32* @g0, align 4
+  %v39 = shl i32 %v25, 16
+  %v40 = ashr exact i32 %v39, 16
+  %v41 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v11, i32 %v40, i32 %v34)
+  %v42 = shl i32 %v32, 16
+  %v43 = ashr exact i32 %v42, 16
+  %v44 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v41, i32 %v43, i32 %v37)
+  store i32 %v44, i32* @g1, align 4
+  %v45 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v10, i32 %v43, i32 %v34)
+  %v46 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v45, i32 %v40, i32 %v37)
+  store i32 %v46, i32* @g2, align 4
+  %v47 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v9, i32 %v40, i32 0)
+  %v48 = shl i32 %v18, 16
+  %v49 = ashr exact i32 %v48, 16
+  %v50 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v47, i32 %v43, i32 %v49)
+  store i32 %v50, i32* @g3, align 4
+  %v51 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v8, i32 %v43, i32 0)
+  %v52 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %v51, i32 %v40, i32 %v49)
+  store i32 %v52, i32* @g4, align 4
+  %v53 = add nsw i32 %v13, 1
+  %v54 = icmp slt i32 %v53, 4
+  store i32 %v53, i32* @g5, align 4
+  br i1 %v54, label %b1, label %b2
+
+b2:                                               ; preds = %b1
+  %v55 = tail call i32 (%s.0*, i8*, ...) @f0(%s.0* @g6, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g7, i32 0, i32 0), i32 %v46) #2
+  %v56 = load i32, i32* @g2, align 4
+  %v57 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 5
+  store i32 %v56, i32* %v57, align 4, !tbaa !0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.sath(i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.asrh(i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+
+!0 = !{!1, !2, i64 20}
+!1 = !{!"", !2, i64 0, !2, i64 4, !2, i64 8, !2, i64 12, !2, i64 16, !2, i64 20, !2, i64 24, !2, i64 28, !2, i64 32}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-phi-def-use.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-phi-def-use.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-phi-def-use.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-phi-def-use.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,121 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that the pipeliner doesn't assert when renaming a phi
+; that looks like: a = PHI b, a
+
+%s.0 = type { i32, i32*, [0 x i32], [0 x i32], [1 x i32] }
+%s.1 = type { %s.2, %s.4, %s.5 }
+%s.2 = type { %s.3 }
+%s.3 = type { i32 }
+%s.4 = type { i32 }
+%s.5 = type { [0 x i32], [0 x i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)*] }
+
+ at g0 = external global i32, align 4
+ at g1 = external global %s.0, align 4
+ at g2 = external global i32, align 4
+ at g3 = external global i32, align 4
+ at g4 = external global i32*, align 4
+
+define void @f0(%s.1* nocapture readonly %a0) #0 {
+b0:
+  %v0 = alloca [0 x i32], align 4
+  %v1 = load i32, i32* @g0, align 4
+  %v2 = load i32, i32* undef, align 4
+  %v3 = load i32*, i32** getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 1), align 4
+  %v4 = load i32, i32* @g2, align 4
+  %v5 = sub i32 0, %v4
+  %v6 = getelementptr inbounds i32, i32* %v3, i32 %v5
+  %v7 = load i32, i32* undef, align 4
+  switch i32 %v7, label %b15 [
+    i32 0, label %b1
+    i32 1, label %b2
+  ]
+
+b1:                                               ; preds = %b0
+  store i32 0, i32* @g3, align 4
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v8 = icmp eq i32 %v1, 0
+  %v9 = icmp sgt i32 %v2, 0
+  %v10 = getelementptr inbounds [0 x i32], [0 x i32]* %v0, i32 0, i32 0
+  %v11 = sdiv i32 %v2, 2
+  %v12 = add i32 %v11, -1
+  %v13 = getelementptr inbounds [0 x i32], [0 x i32]* %v0, i32 0, i32 1
+  %v14 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2, i32 1, i32 %v1
+  %v15 = sub i32 1, %v4
+  %v16 = getelementptr inbounds i32, i32* %v3, i32 %v15
+  %v17 = sdiv i32 %v2, 4
+  %v18 = icmp slt i32 %v2, -3
+  %v19 = add i32 %v2, -1
+  %v20 = lshr i32 %v19, 2
+  %v21 = mul i32 %v20, 4
+  %v22 = add i32 %v21, 4
+  %v23 = add i32 %v11, -2
+  %v24 = add i32 %v17, 1
+  %v25 = select i1 %v18, i32 1, i32 %v24
+  br label %b4
+
+b3:                                               ; preds = %b14
+  store i32 %v25, i32* @g3, align 4
+  br label %b4
+
+b4:                                               ; preds = %b13, %b3, %b2
+  %v26 = phi i32 [ undef, %b2 ], [ %v42, %b3 ], [ %v42, %b13 ]
+  %v27 = phi i32 [ undef, %b2 ], [ 0, %b3 ], [ 0, %b13 ]
+  %v28 = phi i32 [ undef, %b2 ], [ %v30, %b3 ], [ %v30, %b13 ]
+  %v29 = phi i32 [ undef, %b2 ], [ %v43, %b3 ], [ %v43, %b13 ]
+  %v30 = phi i32 [ undef, %b2 ], [ undef, %b3 ], [ 0, %b13 ]
+  br i1 %v8, label %b6, label %b5
+
+b5:                                               ; preds = %b5, %b4
+  br label %b5
+
+b6:                                               ; preds = %b4
+  br i1 %v9, label %b8, label %b7
+
+b7:                                               ; preds = %b6
+  store i32 0, i32* @g3, align 4
+  br label %b11
+
+b8:                                               ; preds = %b6
+  br i1 undef, label %b9, label %b11
+
+b9:                                               ; preds = %b8
+  %v31 = load i32*, i32** @g4, align 4
+  br label %b10
+
+b10:                                              ; preds = %b10, %b9
+  %v32 = phi i32 [ %v22, %b9 ], [ %v39, %b10 ]
+  %v33 = phi i32 [ %v29, %b9 ], [ %v38, %b10 ]
+  %v34 = add nsw i32 %v32, %v28
+  %v35 = shl i32 %v34, 1
+  %v36 = getelementptr inbounds i32, i32* %v31, i32 %v35
+  %v37 = load i32, i32* %v36, align 4
+  %v38 = select i1 false, i32 0, i32 %v33
+  %v39 = add nsw i32 %v32, 1
+  store i32 %v39, i32* @g3, align 4
+  %v40 = icmp slt i32 %v39, 0
+  br i1 %v40, label %b10, label %b11
+
+b11:                                              ; preds = %b10, %b8, %b7
+  %v41 = phi i32 [ %v29, %b8 ], [ %v29, %b7 ], [ %v38, %b10 ]
+  br i1 false, label %b12, label %b13
+
+b12:                                              ; preds = %b11
+  br label %b13
+
+b13:                                              ; preds = %b12, %b11
+  %v42 = load i32, i32* %v10, align 4
+  %v43 = select i1 false, i32 %v41, i32 1
+  br i1 %v18, label %b4, label %b14
+
+b14:                                              ; preds = %b14, %b13
+  br i1 false, label %b14, label %b3
+
+b15:                                              ; preds = %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-phi-dep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-phi-dep.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-phi-dep.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-phi-dep.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,66 @@
+; RUN: llc -march=hexagon -enable-pipeliner -pipeliner-max-stages=2 < %s | FileCheck %s
+
+; Check that the pipelined code uses the proper address in the
+; prolog and the kernel. The bug occurs when the address computation
+; computes the same value twice.
+
+; CHECK: = addasl(r{{[0-9]+}},[[REG0:(r[0-9]+)]],#1)
+; CHECK-NOT: = addasl(r{{[0-9]+}},[[REG0]],#1)
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0, i16* nocapture %a1) #0 {
+b0:
+  br i1 undef, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0
+  br label %b3
+
+b3:                                               ; preds = %b4, %b2
+  br i1 undef, label %b4, label %b5
+
+b4:                                               ; preds = %b3
+  br label %b3
+
+b5:                                               ; preds = %b3
+  br i1 undef, label %b6, label %b7
+
+b6:                                               ; preds = %b5
+  unreachable
+
+b7:                                               ; preds = %b5
+  br i1 undef, label %b8, label %b12
+
+b8:                                               ; preds = %b7
+  br i1 undef, label %b9, label %b11
+
+b9:                                               ; preds = %b9, %b8
+  br i1 undef, label %b9, label %b10
+
+b10:                                              ; preds = %b9
+  br i1 undef, label %b12, label %b11
+
+b11:                                              ; preds = %b11, %b10, %b8
+  %v0 = phi i32 [ %v6, %b11 ], [ undef, %b8 ], [ undef, %b10 ]
+  %v1 = phi i32 [ %v0, %b11 ], [ %a0, %b8 ], [ undef, %b10 ]
+  %v2 = add nsw i32 %v1, -2
+  %v3 = getelementptr inbounds i16, i16* %a1, i32 %v2
+  %v4 = load i16, i16* %v3, align 2, !tbaa !0
+  %v5 = getelementptr inbounds i16, i16* %a1, i32 %v0
+  store i16 %v4, i16* %v5, align 2, !tbaa !0
+  %v6 = add nsw i32 %v0, -1
+  %v7 = icmp sgt i32 %v6, 0
+  br i1 %v7, label %b11, label %b12
+
+b12:                                              ; preds = %b11, %b10, %b7
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-phi-dep1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-phi-dep1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-phi-dep1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-phi-dep1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,123 @@
+; RUN: llc -disable-lsr -march=hexagon -enable-aa-sched-mi -O2 < %s
+; REQUIRES: asserts
+
+; Test when there is a Phi operand that is defined by another Phi, but
+; the two Phis are scheduled in different iterations.
+
+; Function Attrs: nounwind
+define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8* noalias nocapture %a4, i32 %a5) #0 {
+b0:
+  %v0 = add i32 %a2, -1
+  %v1 = icmp ugt i32 %v0, 1
+  br i1 %v1, label %b1, label %b6
+
+b1:                                               ; preds = %b0
+  %v2 = add i32 %a1, -1
+  %v3 = mul i32 %a3, 2
+  %v4 = add i32 %v3, 1
+  %v5 = add i32 %a3, 1
+  %v6 = add i32 %a1, -2
+  %v7 = getelementptr i8, i8* %a0, i32 2
+  %v8 = add i32 %a5, 1
+  %v9 = getelementptr i8, i8* %a4, i32 %v8
+  br label %b2
+
+b2:                                               ; preds = %b5, %b1
+  %v10 = phi i8* [ %v85, %b5 ], [ %v9, %b1 ]
+  %v11 = phi i8* [ %v84, %b5 ], [ %v7, %b1 ]
+  %v12 = phi i32 [ 0, %b1 ], [ %v83, %b5 ]
+  %v13 = phi i32 [ 1, %b1 ], [ %v82, %b5 ]
+  %v14 = icmp ugt i32 %v2, 1
+  %v15 = mul i32 %v12, %a3
+  br i1 %v14, label %b3, label %b5
+
+b3:                                               ; preds = %b2
+  %v16 = add i32 %v12, 2
+  %v17 = add i32 %v15, 1
+  %v18 = mul i32 %v16, %a3
+  %v19 = add i32 %v4, %v15
+  %v20 = add i32 %v15, %a3
+  %v21 = add i32 %v5, %v15
+  %v22 = getelementptr i8, i8* %a0, i32 %v15
+  %v23 = getelementptr i8, i8* %a0, i32 %v17
+  %v24 = getelementptr i8, i8* %a0, i32 %v18
+  %v25 = getelementptr i8, i8* %a0, i32 %v19
+  %v26 = getelementptr i8, i8* %a0, i32 %v20
+  %v27 = getelementptr i8, i8* %a0, i32 %v21
+  %v28 = load i8, i8* %v23, align 1
+  %v29 = load i8, i8* %v22, align 1
+  %v30 = load i8, i8* %v25, align 1
+  %v31 = load i8, i8* %v24, align 1
+  %v32 = load i8, i8* %v27, align 1
+  %v33 = load i8, i8* %v26, align 1
+  br label %b4
+
+b4:                                               ; preds = %b4, %b3
+  %v34 = phi i8* [ %v80, %b4 ], [ %v10, %b3 ]
+  %v35 = phi i8* [ %v79, %b4 ], [ %v11, %b3 ]
+  %v36 = phi i32 [ %v78, %b4 ], [ %v6, %b3 ]
+  %v37 = phi i8 [ %v28, %b3 ], [ %v43, %b4 ]
+  %v38 = phi i8 [ %v29, %b3 ], [ %v37, %b4 ]
+  %v39 = phi i8 [ %v30, %b3 ], [ %v47, %b4 ]
+  %v40 = phi i8 [ %v31, %b3 ], [ %v39, %b4 ]
+  %v41 = phi i8 [ %v32, %b3 ], [ %v45, %b4 ]
+  %v42 = phi i8 [ %v33, %b3 ], [ %v41, %b4 ]
+  %v43 = load i8, i8* %v35, align 1, !tbaa !0
+  %v44 = getelementptr i8, i8* %v35, i32 %a3
+  %v45 = load i8, i8* %v44, align 1, !tbaa !0
+  %v46 = getelementptr i8, i8* %v35, i32 %v3
+  %v47 = load i8, i8* %v46, align 1, !tbaa !0
+  %v48 = zext i8 %v38 to i32
+  %v49 = zext i8 %v37 to i32
+  %v50 = zext i8 %v43 to i32
+  %v51 = zext i8 %v40 to i32
+  %v52 = zext i8 %v39 to i32
+  %v53 = zext i8 %v47 to i32
+  %v54 = sub i32 %v49, %v52
+  %v55 = mul i32 %v54, 2
+  %v56 = add i32 %v50, %v48
+  %v57 = sub i32 %v56, %v51
+  %v58 = sub i32 %v57, %v53
+  %v59 = add i32 %v58, %v55
+  %v60 = zext i8 %v42 to i32
+  %v61 = zext i8 %v45 to i32
+  %v62 = sub i32 %v60, %v61
+  %v63 = mul i32 %v62, 2
+  %v64 = sub i32 %v48, %v50
+  %v65 = add i32 %v64, %v51
+  %v66 = add i32 %v65, %v63
+  %v67 = sub i32 %v66, %v53
+  %v68 = icmp sgt i32 %v59, -1
+  %v69 = sub i32 0, %v59
+  %v70 = select i1 %v68, i32 %v59, i32 %v69
+  %v71 = icmp sgt i32 %v67, -1
+  %v72 = sub i32 0, %v67
+  %v73 = select i1 %v71, i32 %v67, i32 %v72
+  %v74 = add nsw i32 %v70, %v73
+  %v75 = icmp ugt i32 %v74, 255
+  %v76 = trunc i32 %v74 to i8
+  %v77 = select i1 %v75, i8 -1, i8 %v76
+  store i8 %v77, i8* %v34, align 1, !tbaa !0
+  %v78 = add i32 %v36, -1
+  %v79 = getelementptr i8, i8* %v35, i32 1
+  %v80 = getelementptr i8, i8* %v34, i32 1
+  %v81 = icmp eq i32 %v78, 0
+  br i1 %v81, label %b5, label %b4
+
+b5:                                               ; preds = %b4, %b2
+  %v82 = add i32 %v13, 1
+  %v83 = add i32 %v12, 1
+  %v84 = getelementptr i8, i8* %v11, i32 %a3
+  %v85 = getelementptr i8, i8* %v10, i32 %a5
+  %v86 = icmp eq i32 %v82, %v0
+  br i1 %v86, label %b6, label %b2
+
+b6:                                               ; preds = %b5, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-phi-order.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-phi-order.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-phi-order.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-phi-order.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,26 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+%s.0 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, [49 x i8], [49 x i8], [25 x i8], [6 x i8], [29 x i8], i8, [6 x i8], [6 x i8] }
+
+define void @f0(%s.0* nocapture %a0) {
+b0:
+  br i1 undef, label %b2, label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ %v6, %b1 ], [ undef, %b0 ]
+  %v1 = phi i32 [ %v8, %b1 ], [ 1, %b0 ]
+  %v2 = and i32 %v0, 255
+  %v3 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 9, i32 %v1
+  %v4 = load i8, i8* %v3, align 1
+  %v5 = zext i8 %v4 to i32
+  %v6 = add nsw i32 %v5, %v2
+  %v7 = trunc i32 %v6 to i8
+  store i8 %v7, i8* %v3, align 1
+  %v8 = add nsw i32 %v1, 1
+  %v9 = icmp sgt i32 %v8, undef
+  br i1 %v9, label %b2, label %b1
+
+b2:                                               ; preds = %b2, %b1, %b0
+  br label %b2
+}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-phi-ref1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-phi-ref1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-phi-ref1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-phi-ref1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,72 @@
+; RUN: llc  -march=hexagon -mno-pairing -mno-compound < %s
+; REQUIRES: asserts
+
+; Test that the SWP doesn't assert when generating new phis. In this example, a
+; phi references another phi and phi as well as the phi loop value are all
+; defined in different stages.
+;  v3 =             stage 2
+;  v2 = phi(vb, v3) stage 1
+;  v1 = phi(va, v2) stage 0
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  br i1 undef, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  br i1 undef, label %b3, label %b4
+
+b4:                                               ; preds = %b4, %b3
+  %v0 = phi i32 [ %v17, %b4 ], [ undef, %b3 ]
+  %v1 = phi i64 [ %v13, %b4 ], [ undef, %b3 ]
+  %v2 = phi i32 [ %v19, %b4 ], [ undef, %b3 ]
+  %v3 = phi i32 [ %v4, %b4 ], [ undef, %b3 ]
+  %v4 = phi i32 [ %v14, %b4 ], [ undef, %b3 ]
+  %v5 = phi i32 [ %v18, %b4 ], [ undef, %b3 ]
+  %v6 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v3, i32 %v3)
+  %v7 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v0, i32 undef)
+  %v8 = tail call i64 @llvm.hexagon.S2.valignib(i64 %v7, i64 undef, i32 2)
+  %v9 = inttoptr i32 %v5 to i16*
+  %v10 = load i16, i16* %v9, align 2, !tbaa !0
+  %v11 = sext i16 %v10 to i32
+  %v12 = add nsw i32 %v5, -8
+  %v13 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v1, i64 %v6, i64 %v8)
+  %v14 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 %v11, i32 %v0)
+  %v15 = inttoptr i32 %v12 to i16*
+  %v16 = load i16, i16* %v15, align 2, !tbaa !0
+  %v17 = sext i16 %v16 to i32
+  %v18 = add nsw i32 %v5, -16
+  %v19 = add nsw i32 %v2, 1
+  %v20 = icmp eq i32 %v19, 0
+  br i1 %v20, label %b5, label %b4
+
+b5:                                               ; preds = %b4
+  %v21 = phi i64 [ %v13, %b4 ]
+  unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vdmacs.s0(i64, i64, i64) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.valignib(i64, i64, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-phi.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-phi.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-phi.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,157 @@
+; RUN: llc -march=hexagon -enable-unsafe-fp-math -enable-pipeliner \
+; RUN:     -pipeliner-prune-deps=false -stats -o /dev/null < %s
+; REQUIRES: asserts
+
+; Test that checks we dont crash when SWP a loop with lots of Phis, and
+; the Phi operand refer to Phis from the same loop.
+
+; Function Attrs: nounwind
+define void @f0(float* nocapture %a0, float* nocapture %a1) #0 {
+b0:
+  %v0 = alloca [400 x float], align 4
+  %v1 = getelementptr inbounds float, float* %a1, i32 1
+  %v2 = getelementptr inbounds float, float* %a1, i32 2
+  %v3 = getelementptr inbounds float, float* %a1, i32 3
+  %v4 = getelementptr inbounds float, float* %a1, i32 4
+  %v5 = getelementptr inbounds float, float* %a1, i32 5
+  %v6 = getelementptr inbounds float, float* %a1, i32 6
+  %v7 = getelementptr inbounds float, float* %a1, i32 7
+  %v8 = getelementptr inbounds float, float* %a1, i32 8
+  %v9 = getelementptr inbounds float, float* %a1, i32 9
+  %v10 = getelementptr inbounds float, float* %a1, i32 10
+  %v11 = getelementptr inbounds float, float* %a1, i32 11
+  %v12 = getelementptr inbounds float, float* %a1, i32 12
+  %v13 = getelementptr inbounds float, float* %a1, i32 13
+  %v14 = getelementptr inbounds float, float* %a1, i32 14
+  %v15 = getelementptr inbounds float, float* %a1, i32 15
+  %v16 = getelementptr inbounds float, float* %a1, i32 16
+  %v17 = load float, float* %a1, align 4
+  %v18 = load float, float* %v1, align 4
+  %v19 = load float, float* %v2, align 4
+  %v20 = load float, float* %v3, align 4
+  %v21 = load float, float* %v4, align 4
+  %v22 = load float, float* %v5, align 4
+  %v23 = load float, float* %v6, align 4
+  %v24 = load float, float* %v7, align 4
+  %v25 = load float, float* %v8, align 4
+  %v26 = load float, float* %v9, align 4
+  %v27 = load float, float* %v10, align 4
+  %v28 = load float, float* %v11, align 4
+  %v29 = load float, float* %v12, align 4
+  %v30 = load float, float* %v13, align 4
+  %v31 = load float, float* %v14, align 4
+  %v32 = load float, float* %v15, align 4
+  %v33 = load float, float* %v16, align 4
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v34 = phi float [ undef, %b0 ], [ %v103, %b1 ]
+  %v35 = phi float [ undef, %b0 ], [ %v34, %b1 ]
+  %v36 = phi float [ undef, %b0 ], [ %v35, %b1 ]
+  %v37 = phi float [ undef, %b0 ], [ %v36, %b1 ]
+  %v38 = phi float [ undef, %b0 ], [ %v37, %b1 ]
+  %v39 = phi float [ undef, %b0 ], [ %v38, %b1 ]
+  %v40 = phi float [ undef, %b0 ], [ %v39, %b1 ]
+  %v41 = phi float [ undef, %b0 ], [ %v40, %b1 ]
+  %v42 = phi float [ undef, %b0 ], [ %v41, %b1 ]
+  %v43 = phi float [ undef, %b0 ], [ %v42, %b1 ]
+  %v44 = phi float [ undef, %b0 ], [ %v43, %b1 ]
+  %v45 = phi float [ undef, %b0 ], [ %v44, %b1 ]
+  %v46 = phi float [ undef, %b0 ], [ %v45, %b1 ]
+  %v47 = phi float [ undef, %b0 ], [ %v46, %b1 ]
+  %v48 = phi float [ undef, %b0 ], [ %v47, %b1 ]
+  %v49 = phi float [ undef, %b0 ], [ %v48, %b1 ]
+  %v50 = phi float [ %v33, %b0 ], [ %v105, %b1 ]
+  %v51 = phi float [ %v32, %b0 ], [ %v100, %b1 ]
+  %v52 = phi float [ %v31, %b0 ], [ %v98, %b1 ]
+  %v53 = phi float [ %v30, %b0 ], [ %v96, %b1 ]
+  %v54 = phi float [ %v29, %b0 ], [ %v94, %b1 ]
+  %v55 = phi float [ %v28, %b0 ], [ %v92, %b1 ]
+  %v56 = phi float [ %v27, %b0 ], [ %v90, %b1 ]
+  %v57 = phi float [ %v26, %b0 ], [ %v88, %b1 ]
+  %v58 = phi float [ %v25, %b0 ], [ %v86, %b1 ]
+  %v59 = phi float [ %v24, %b0 ], [ %v84, %b1 ]
+  %v60 = phi float [ %v23, %b0 ], [ %v82, %b1 ]
+  %v61 = phi float [ %v22, %b0 ], [ %v80, %b1 ]
+  %v62 = phi float [ %v21, %b0 ], [ %v78, %b1 ]
+  %v63 = phi float [ %v20, %b0 ], [ %v76, %b1 ]
+  %v64 = phi float [ %v19, %b0 ], [ %v74, %b1 ]
+  %v65 = phi float [ %v18, %b0 ], [ %v72, %b1 ]
+  %v66 = phi float [ %v17, %b0 ], [ %v69, %b1 ]
+  %v67 = phi i32 [ 0, %b0 ], [ %v70, %b1 ]
+  %v68 = fmul float %v49, %v49
+  %v69 = fadd float %v66, %v68
+  %v70 = add nsw i32 %v67, 1
+  %v71 = fmul float %v49, %v48
+  %v72 = fadd float %v65, %v71
+  %v73 = fmul float %v49, %v47
+  %v74 = fadd float %v64, %v73
+  %v75 = fmul float %v49, %v46
+  %v76 = fadd float %v63, %v75
+  %v77 = fmul float %v49, %v45
+  %v78 = fadd float %v62, %v77
+  %v79 = fmul float %v49, %v44
+  %v80 = fadd float %v61, %v79
+  %v81 = fmul float %v49, %v43
+  %v82 = fadd float %v60, %v81
+  %v83 = fmul float %v49, %v42
+  %v84 = fadd float %v59, %v83
+  %v85 = fmul float %v49, %v41
+  %v86 = fadd float %v58, %v85
+  %v87 = fmul float %v49, %v40
+  %v88 = fadd float %v57, %v87
+  %v89 = fmul float %v49, %v39
+  %v90 = fadd float %v56, %v89
+  %v91 = fmul float %v49, %v38
+  %v92 = fadd float %v55, %v91
+  %v93 = fmul float %v49, %v37
+  %v94 = fadd float %v54, %v93
+  %v95 = fmul float %v49, %v36
+  %v96 = fadd float %v53, %v95
+  %v97 = fmul float %v49, %v35
+  %v98 = fadd float %v52, %v97
+  %v99 = fmul float %v49, %v34
+  %v100 = fadd float %v51, %v99
+  %v101 = add nsw i32 %v67, 16
+  %v102 = getelementptr inbounds [400 x float], [400 x float]* %v0, i32 0, i32 %v101
+  %v103 = load float, float* %v102, align 4, !tbaa !0
+  %v104 = fmul float %v49, %v103
+  %v105 = fadd float %v50, %v104
+  %v106 = icmp eq i32 %v70, 384
+  br i1 %v106, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  store float %v69, float* %a1, align 4
+  store float %v72, float* %v1, align 4
+  store float %v74, float* %v2, align 4
+  store float %v76, float* %v3, align 4
+  store float %v78, float* %v4, align 4
+  store float %v80, float* %v5, align 4
+  store float %v82, float* %v6, align 4
+  store float %v84, float* %v7, align 4
+  store float %v86, float* %v8, align 4
+  store float %v88, float* %v9, align 4
+  store float %v90, float* %v10, align 4
+  store float %v92, float* %v11, align 4
+  store float %v94, float* %v12, align 4
+  store float %v96, float* %v13, align 4
+  store float %v98, float* %v14, align 4
+  store float %v100, float* %v15, align 4
+  store float %v105, float* %v16, align 4
+  %v107 = fcmp olt float %v69, 1.000000e+00
+  br i1 %v107, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  store float 1.000000e+00, float* %a1, align 4, !tbaa !0
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"float", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-physreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-physreg.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-physreg.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-physreg.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,48 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Make sure pipeliner handle physical registers (e.g., used in
+; inline asm
+
+ at g0 = external global i32*, align 4
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i8** nocapture %a1) #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  br i1 undef, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  br label %b5
+
+b5:                                               ; preds = %b5, %b4
+  %v0 = phi i32* [ inttoptr (i32 33554432 to i32*), %b4 ], [ %v4, %b5 ]
+  %v1 = phi i32 [ 0, %b4 ], [ %v5, %b5 ]
+  %v2 = ptrtoint i32* %v0 to i32
+  tail call void asm sideeffect "    r1 = $1\0A    r0 = $0\0A    memw(r0) = r1\0A    dcfetch(r0)\0A", "r,r,~{r0},~{r1}"(i32 %v2, i32 %v1) #0
+  %v3 = load i32*, i32** @g0, align 4
+  %v4 = getelementptr inbounds i32, i32* %v3, i32 1
+  store i32* %v4, i32** @g0, align 4
+  %v5 = add nsw i32 %v1, 1
+  %v6 = icmp eq i32 %v5, 200
+  br i1 %v6, label %b6, label %b5
+
+b6:                                               ; preds = %b5
+  br label %b7
+
+b7:                                               ; preds = %b7, %b6
+  br i1 undef, label %b8, label %b7
+
+b8:                                               ; preds = %b7
+  ret i32 0
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-prolog-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-prolog-phi.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-prolog-phi.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-prolog-phi.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,55 @@
+; RUN: llc -march=hexagon -rdf-opt=0 < %s | FileCheck %s
+
+; Test that we generate the correct name for a value in a prolog block. The
+; pipeliner was using an incorrect value for an instruction in the 2nd prolog
+; block for a value defined by a Phi. The result was that an instruction in
+; the 1st and 2nd prolog blocks contain the same operands.
+
+; CHECK: vcmp.gt([[VREG:(v[0-9]+)]].uh,v{{[0-9]+}}.uh)
+; CHECK-NOT: vcmp.gt([[VREG]].uh,v{{[0-9]+}}.uh)
+; CHECK: loop0
+
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b5
+
+b1:                                               ; preds = %b0
+  %v0 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> undef)
+  br label %b2
+
+b2:                                               ; preds = %b4, %b1
+  %v1 = phi <32 x i32> [ undef, %b1 ], [ %v7, %b4 ]
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v2 = phi i32 [ 0, %b2 ], [ %v8, %b3 ]
+  %v3 = phi <32 x i32> [ zeroinitializer, %b2 ], [ %v0, %b3 ]
+  %v4 = phi <32 x i32> [ %v1, %b2 ], [ %v7, %b3 ]
+  %v5 = tail call <1024 x i1> @llvm.hexagon.V6.vgtuh.128B(<32 x i32> %v3, <32 x i32> undef)
+  %v6 = tail call <1024 x i1> @llvm.hexagon.V6.veqh.and.128B(<1024 x i1> %v5, <32 x i32> undef, <32 x i32> undef)
+  %v7 = tail call <32 x i32> @llvm.hexagon.V6.vaddhq.128B(<1024 x i1> %v6, <32 x i32> %v4, <32 x i32> undef)
+  %v8 = add nsw i32 %v2, 1
+  %v9 = icmp slt i32 %v8, undef
+  br i1 %v9, label %b3, label %b4
+
+b4:                                               ; preds = %b3
+  br i1 undef, label %b5, label %b2
+
+b5:                                               ; preds = %b4, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <1024 x i1> @llvm.hexagon.V6.vgtuh.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <1024 x i1> @llvm.hexagon.V6.veqh.and.128B(<1024 x i1>, <32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddhq.128B(<1024 x i1>, <32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-regseq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-regseq.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-regseq.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-regseq.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,24 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+%s.0 = type { i64 }
+
+define i64 @f0(%s.0* nocapture %a0, i32 %a1) {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ %v6, %b1 ], [ 0, %b0 ]
+  %v1 = phi i64 [ %v5, %b1 ], [ undef, %b0 ]
+  %v2 = load i16, i16* undef, align 2
+  %v3 = zext i16 %v2 to i64
+  %v4 = and i64 %v1, -4294967296
+  %v5 = or i64 %v3, %v4
+  %v6 = add nsw i32 %v0, 1
+  %v7 = icmp eq i32 %v6, %a1
+  br i1 %v7, label %b2, label %b1
+
+b2:                                               ; preds = %b1, %b0
+  %v8 = phi i64 [ undef, %b0 ], [ %v5, %b1 ]
+  ret i64 %v8
+}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-remove-dep-ice.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-remove-dep-ice.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-remove-dep-ice.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-remove-dep-ice.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,35 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Test that the pipeliner doesn't ICE in the ScheduleDAG code because
+; the latency values are not updated properly. The pipeliner should
+; not change the latency of chain edges.
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  %v0 = alloca [10 x i16], align 8
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = phi i32 [ %v7, %b1 ], [ undef, %b0 ]
+  %v2 = add i32 %v1, -1
+  %v3 = getelementptr inbounds [10 x i16], [10 x i16]* %v0, i32 0, i32 %v2
+  %v4 = add i32 %v1, -2
+  %v5 = getelementptr inbounds [10 x i16], [10 x i16]* %v0, i32 0, i32 %v4
+  %v6 = load i16, i16* %v5, align 2, !tbaa !0
+  store i16 %v6, i16* %v3, align 2, !tbaa !0
+  %v7 = add i32 %v1, -4
+  %v8 = icmp sgt i32 %v7, 3
+  br i1 %v8, label %b1, label %b2
+
+b2:                                               ; preds = %b2, %b1
+  br label %b2
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-rename-dead-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-rename-dead-phi.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-rename-dead-phi.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-rename-dead-phi.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,68 @@
+; RUN: llc -march=hexagon -fp-contract=fast -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Pipelining can eliminate the need for a Phi if the loop carried use
+; is scheduled first. We need to rename register uses of the Phi
+; that may occur after the loop.
+
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b12
+
+b1:                                               ; preds = %b0
+  %v0 = load float, float* undef, align 4
+  br i1 undef, label %b2, label %b5
+
+b2:                                               ; preds = %b1
+  br i1 undef, label %b3, label %b4
+
+b3:                                               ; preds = %b3, %b2
+  br label %b3
+
+b4:                                               ; preds = %b4, %b2
+  br i1 undef, label %b5, label %b4
+
+b5:                                               ; preds = %b4, %b1
+  br i1 undef, label %b6, label %b9
+
+b6:                                               ; preds = %b5
+  br i1 undef, label %b7, label %b8
+
+b7:                                               ; preds = %b7, %b6
+  br label %b7
+
+b8:                                               ; preds = %b8, %b6
+  %v1 = phi i32 [ %v7, %b8 ], [ 2, %b6 ]
+  %v2 = phi float [ %v6, %b8 ], [ undef, %b6 ]
+  %v3 = phi float [ %v2, %b8 ], [ undef, %b6 ]
+  %v4 = fmul float undef, %v2
+  %v5 = fsub float %v4, %v3
+  %v6 = fadd float %v5, undef
+  %v7 = add nsw i32 %v1, 1
+  %v8 = icmp eq i32 %v7, undef
+  br i1 %v8, label %b9, label %b8
+
+b9:                                               ; preds = %b8, %b5
+  %v9 = phi float [ undef, %b5 ], [ %v2, %b8 ]
+  %v10 = fsub float 0.000000e+00, %v9
+  %v11 = fadd float %v10, undef
+  %v12 = fmul float undef, %v11
+  %v13 = fcmp ugt float %v12, 0.000000e+00
+  br i1 %v13, label %b10, label %b11
+
+b10:                                              ; preds = %b9
+  br label %b11
+
+b11:                                              ; preds = %b10, %b9
+  %v14 = phi float [ undef, %b10 ], [ %v0, %b9 ]
+  %v15 = fadd float undef, %v14
+  br label %b13
+
+b12:                                              ; preds = %b0
+  ret void
+
+b13:                                              ; preds = %b13, %b11
+  br label %b13
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-replace-uses1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-replace-uses1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-replace-uses1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-replace-uses1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,59 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0) #0 {
+b0:
+  %v0 = ashr i32 %a0, 1
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = phi i32 [ %v17, %b1 ], [ undef, %b0 ]
+  %v2 = phi i32 [ %v19, %b1 ], [ 0, %b0 ]
+  %v3 = phi i32 [ %v4, %b1 ], [ undef, %b0 ]
+  %v4 = phi i32 [ %v14, %b1 ], [ undef, %b0 ]
+  %v5 = phi i32 [ %v18, %b1 ], [ undef, %b0 ]
+  %v6 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v1, i32 undef)
+  %v7 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v3, i32 %v3)
+  %v8 = tail call i64 @llvm.hexagon.S2.valignib(i64 %v6, i64 undef, i32 2)
+  %v9 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 undef, i64 %v7, i64 %v8)
+  %v10 = inttoptr i32 %v5 to i16*
+  %v11 = load i16, i16* %v10, align 2
+  %v12 = sext i16 %v11 to i32
+  %v13 = add nsw i32 %v5, -8
+  %v14 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 %v12, i32 %v1)
+  %v15 = inttoptr i32 %v13 to i16*
+  %v16 = load i16, i16* %v15, align 2
+  %v17 = sext i16 %v16 to i32
+  %v18 = add nsw i32 %v5, -16
+  %v19 = add nsw i32 %v2, 1
+  %v20 = icmp eq i32 %v19, %v0
+  br i1 %v20, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v21 = phi i64 [ %v9, %b1 ]
+  %v22 = trunc i64 %v21 to i32
+  %v23 = bitcast i8* undef to i32*
+  store i32 %v22, i32* %v23, align 4
+  call void @llvm.trap()
+  unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vdmacs.s0(i64, i64, i64) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.valignib(i64, i64, i32) #1
+
+; Function Attrs: noreturn nounwind
+declare void @llvm.trap() #2
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { noreturn nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-resmii.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-resmii.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-resmii.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-resmii.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,43 @@
+; RUN: llc -disable-lsr -march=hexagon -enable-pipeliner  \
+; RUN:     -debug-only=pipeliner < %s -o - 2>&1 > /dev/null | FileCheck %s
+; REQUIRES: asserts
+;
+; Test that checks if the ResMII is 1.
+
+; CHECK: MII = 1 (rec=1, res=1)
+
+; Function Attrs: nounwind
+define void @f0(i32* nocapture %a0, i32 %a1) #0 {
+b0:
+  %v0 = icmp sgt i32 %a1, 1
+  br i1 %v0, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v1 = load i32, i32* %a0, align 4
+  %v2 = add i32 %v1, 10
+  %v3 = getelementptr i32, i32* %a0, i32 1
+  %v4 = add i32 %a1, -1
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v5 = phi i32 [ %v12, %b2 ], [ %v4, %b1 ]
+  %v6 = phi i32* [ %v11, %b2 ], [ %v3, %b1 ]
+  %v7 = phi i32 [ %v10, %b2 ], [ %v2, %b1 ]
+  store i32 %v7, i32* %v6, align 4
+  %v8 = add i32 %v7, 10
+  %v9 = getelementptr i32, i32* %v6, i32 -1
+  store i32 %v8, i32* %v9, align 4
+  %v10 = add i32 %v7, 10
+  %v11 = getelementptr i32, i32* %v6, i32 1
+  %v12 = add i32 %v5, -1
+  %v13 = icmp eq i32 %v12, 0
+  br i1 %v13, label %b3, label %b2
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,44 @@
+; RUN: llc  -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b3
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = phi i32 [ 0, %b1 ], [ %v9, %b2 ]
+  %v1 = phi <16 x i32> [ undef, %b1 ], [ %v2, %b2 ]
+  %v2 = phi <16 x i32> [ undef, %b1 ], [ %v4, %b2 ]
+  %v3 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v2, <16 x i32> %v1, i32 62)
+  %v4 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> undef, <16 x i32> zeroinitializer)
+  %v5 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v4, <16 x i32> %v2, i32 2)
+  %v6 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> undef, <16 x i32> %v3)
+  %v7 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v6, <16 x i32> %v5)
+  %v8 = tail call <16 x i32> @llvm.hexagon.V6.vabsh(<16 x i32> %v7)
+  store <16 x i32> %v8, <16 x i32>* undef, align 64
+  %v9 = add nsw i32 %v0, 1
+  %v10 = icmp slt i32 %v9, undef
+  br i1 %v10, label %b2, label %b3
+
+b3:                                               ; preds = %b2, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vabsh(<16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,52 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Test that causes an assert when the phi reuse code does not set
+; PhiOp2 correctly for use in the next stage. This occurs when the
+; number of stages is two or more.
+
+; Function Attrs: nounwind
+define void @f0(i16* noalias nocapture %a0) #0 {
+b0:
+  br i1 undef, label %b1, label %b3
+
+b1:                                               ; preds = %b0
+  %v0 = bitcast i16* %a0 to <16 x i32>*
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v1 = phi i32 [ 0, %b1 ], [ %v15, %b2 ]
+  %v2 = phi <16 x i32>* [ %v0, %b1 ], [ %v14, %b2 ]
+  %v3 = phi <16 x i32>* [ undef, %b1 ], [ %v6, %b2 ]
+  %v4 = phi <16 x i32> [ undef, %b1 ], [ %v10, %b2 ]
+  %v5 = phi <16 x i32> [ undef, %b1 ], [ %v4, %b2 ]
+  %v6 = getelementptr inbounds <16 x i32>, <16 x i32>* %v3, i32 1
+  %v7 = load <16 x i32>, <16 x i32>* %v3, align 64
+  %v8 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> undef, <16 x i32> %v7)
+  %v9 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v4, <16 x i32> %v5, i32 62)
+  %v10 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v8, <16 x i32> undef)
+  %v11 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v10, <16 x i32> %v4, i32 2)
+  %v12 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffh(<16 x i32> %v9, <16 x i32> %v11)
+  %v13 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 1
+  store <16 x i32> %v12, <16 x i32>* %v2, align 64
+  %v14 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 2
+  store <16 x i32> zeroinitializer, <16 x i32>* %v13, align 64
+  %v15 = add nsw i32 %v1, 1
+  %v16 = icmp slt i32 %v15, undef
+  br i1 %v16, label %b2, label %b3
+
+b3:                                               ; preds = %b2, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vabsdiffh(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-4.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-4.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-4.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,169 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+
+; Test that we generate the correct Phi names in the epilog when we need
+; to reuse an existing Phi. This bug caused an assert in live variable
+; analysis because the wrong virtual register was used.
+; The bug occurs when a Phi references another Phi, and referent Phi
+; value is used in multiple stages. When this occurs, the referring Phi
+; can reuse one of the new values. We have code that deals with this in the
+; kernel, but this case can occur in the epilog too.
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32>, <16 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32>, <16 x i32>) #0
+
+; Function Attrs: nounwind
+define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2) #1 {
+b0:
+  %v0 = mul nsw i32 %a1, 2
+  br i1 undef, label %b1, label %b5
+
+b1:                                               ; preds = %b0
+  %v1 = getelementptr inbounds i8, i8* %a0, i32 %v0
+  %v2 = icmp sgt i32 %a2, 64
+  %v3 = add i32 %v0, 64
+  %v4 = add i32 %a1, 64
+  %v5 = sub i32 64, %a1
+  %v6 = sub i32 64, %v0
+  br i1 %v2, label %b2, label %b4
+
+b2:                                               ; preds = %b1
+  %v7 = getelementptr inbounds i8, i8* %v1, i32 %v3
+  %v8 = getelementptr inbounds i8, i8* %v1, i32 %v4
+  %v9 = getelementptr inbounds i8, i8* %v1, i32 64
+  %v10 = getelementptr inbounds i8, i8* %v1, i32 %v5
+  %v11 = getelementptr inbounds i8, i8* %v1, i32 %v6
+  %v12 = bitcast i8* %v7 to <16 x i32>*
+  %v13 = bitcast i8* %v8 to <16 x i32>*
+  %v14 = bitcast i8* %v9 to <16 x i32>*
+  %v15 = bitcast i8* %v10 to <16 x i32>*
+  %v16 = bitcast i8* %v11 to <16 x i32>*
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v17 = phi <16 x i32>* [ null, %b2 ], [ %v52, %b3 ]
+  %v18 = phi <16 x i32>* [ %v12, %b2 ], [ %v34, %b3 ]
+  %v19 = phi <16 x i32>* [ %v13, %b2 ], [ %v32, %b3 ]
+  %v20 = phi <16 x i32>* [ %v14, %b2 ], [ %v30, %b3 ]
+  %v21 = phi <16 x i32>* [ %v15, %b2 ], [ %v28, %b3 ]
+  %v22 = phi <16 x i32>* [ %v16, %b2 ], [ %v26, %b3 ]
+  %v23 = phi <32 x i32> [ undef, %b2 ], [ %v37, %b3 ]
+  %v24 = phi <32 x i32> [ zeroinitializer, %b2 ], [ %v23, %b3 ]
+  %v25 = phi i32 [ %a2, %b2 ], [ %v53, %b3 ]
+  %v26 = getelementptr inbounds <16 x i32>, <16 x i32>* %v22, i32 1
+  %v27 = load <16 x i32>, <16 x i32>* %v22, align 64
+  %v28 = getelementptr inbounds <16 x i32>, <16 x i32>* %v21, i32 1
+  %v29 = load <16 x i32>, <16 x i32>* %v21, align 64
+  %v30 = getelementptr inbounds <16 x i32>, <16 x i32>* %v20, i32 1
+  %v31 = load <16 x i32>, <16 x i32>* %v20, align 64
+  %v32 = getelementptr inbounds <16 x i32>, <16 x i32>* %v19, i32 1
+  %v33 = load <16 x i32>, <16 x i32>* %v19, align 64
+  %v34 = getelementptr inbounds <16 x i32>, <16 x i32>* %v18, i32 1
+  %v35 = load <16 x i32>, <16 x i32>* %v18, align 64
+  %v36 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v33, <16 x i32> %v29) #3
+  %v37 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> undef, <32 x i32> %v36, i32 67372036) #3
+  %v38 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v23) #3
+  %v39 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v24) #3
+  %v40 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v38, <16 x i32> %v39, i32 2) #3
+  %v41 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v37) #3
+  %v42 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v41, <16 x i32> %v38, i32 2) #3
+  %v43 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v37) #3
+  %v44 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v43, <16 x i32> undef, i32 2) #3
+  %v45 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v40, <16 x i32> %v42) #3
+  %v46 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v45, <16 x i32> %v38, i32 101058054) #3
+  %v47 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v46, <16 x i32> zeroinitializer, i32 67372036) #3
+  %v48 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> undef, <16 x i32> %v44) #3
+  %v49 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v48, <16 x i32> undef, i32 101058054) #3
+  %v50 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v49, <16 x i32> zeroinitializer, i32 67372036) #3
+  %v51 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> %v50, <16 x i32> %v47) #3
+  %v52 = getelementptr inbounds <16 x i32>, <16 x i32>* %v17, i32 1
+  store <16 x i32> %v51, <16 x i32>* %v17, align 64
+  %v53 = add nsw i32 %v25, -64
+  %v54 = icmp sgt i32 %v53, 64
+  br i1 %v54, label %b3, label %b4
+
+b4:                                               ; preds = %b3, %b1
+  unreachable
+
+b5:                                               ; preds = %b0
+  ret void
+}
+
+; Function Attrs: nounwind
+define void @f1(i32 %a0, i32* %a1) #1 {
+b0:
+  %v0 = ptrtoint i32* %a1 to i32
+  %v1 = ashr i32 %a0, 1
+  %v2 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 undef, i32 undef)
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  br i1 undef, label %b1, label %b2
+
+b2:                                               ; preds = %b2, %b1, %b0
+  %v3 = phi i64 [ %v11, %b2 ], [ undef, %b0 ], [ undef, %b1 ]
+  %v4 = phi i32 [ %v12, %b2 ], [ 0, %b0 ], [ undef, %b1 ]
+  %v5 = phi i32 [ %v6, %b2 ], [ %v2, %b0 ], [ undef, %b1 ]
+  %v6 = phi i32 [ %v10, %b2 ], [ undef, %b0 ], [ undef, %b1 ]
+  %v7 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 undef, i64 %v3, i64 undef)
+  %v8 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v5, i32 %v5)
+  %v9 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 undef, i64 %v8, i64 undef)
+  %v10 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 0, i32 undef)
+  %v11 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v10, i32 %v6)
+  %v12 = add nsw i32 %v4, 1
+  %v13 = icmp eq i32 %v12, %v1
+  br i1 %v13, label %b3, label %b2
+
+b3:                                               ; preds = %b2
+  %v14 = phi i64 [ %v9, %b2 ]
+  %v15 = phi i64 [ %v7, %b2 ]
+  %v16 = trunc i64 %v14 to i32
+  %v17 = trunc i64 %v15 to i32
+  %v18 = inttoptr i32 %v0 to i32*
+  store i32 %v17, i32* %v18, align 4
+  %v19 = bitcast i8* undef to i32*
+  store i32 %v16, i32* %v19, align 4
+  call void @llvm.trap()
+  unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.A2.combinew(i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vdmacs.s0(i64, i64, i64) #0
+
+; Function Attrs: noreturn nounwind
+declare void @llvm.trap() #2
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #2 = { noreturn nounwind }
+attributes #3 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,34 @@
+; RUN: llc -march=hexagon -enable-pipeliner -fp-contract=fast < %s
+; REQUIRES: asserts
+
+; Test that the code which reuses existing Phis works when the Phis are used
+; in multiple stages. In this case, one can be reused, but the other must be
+; generated.
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0) #0 {
+b0:
+  %v0 = icmp sgt i32 %a0, 0
+  br i1 %v0, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = phi i32 [ %v11, %b1 ], [ 0, %b0 ]
+  %v2 = phi float [ %v4, %b1 ], [ undef, %b0 ]
+  %v3 = phi float [ %v2, %b1 ], [ undef, %b0 ]
+  %v4 = load float, float* undef, align 4
+  %v5 = fmul float %v4, 0x3FEFAA0000000000
+  %v6 = fadd float undef, %v5
+  %v7 = fmul float %v2, 0xBFFFAA0000000000
+  %v8 = fadd float %v7, %v6
+  %v9 = fmul float %v3, 0x3FEFAA0000000000
+  %v10 = fadd float %v9, %v8
+  store float %v10, float* undef, align 4
+  %v11 = add nsw i32 %v1, 1
+  %v12 = icmp eq i32 %v11, %a0
+  br i1 %v12, label %b2, label %b1
+
+b2:                                               ; preds = %b1, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-stages.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-stages.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-stages.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-stages.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,77 @@
+; RUN: llc -march=hexagon -enable-aa-sched-mi -enable-pipeliner \
+; RUN:     -hexagon-expand-condsets=0 -pipeliner-max-stages=2 < %s
+; REQUIRES: asserts
+
+; Disable expand-condsets because it will assert on undefined registers.
+
+; Test that we generate pipelines with multiple stages correctly.
+
+%s.0 = type { [194 x i32], i32*, [10 x i32], [10 x i32], i32, i32, i32, i32, i32, [9 x i32], [9 x i32], i16, i16, i16, i16, %s.1*, %s.2*, %s.3*, %s.4*, %s.5*, %s.6*, %s.7*, %s.8*, %s.9* }
+%s.1 = type { [60 x i32], i16 }
+%s.2 = type { i32, [7 x i32], i16 }
+%s.3 = type { [10 x i32] }
+%s.4 = type { [10 x i32], [10 x i32] }
+%s.5 = type { [5 x i32], i32, i32 }
+%s.6 = type { [5 x i32], i32, i32 }
+%s.7 = type { [4 x i32], [4 x i32] }
+%s.8 = type { [5 x i32], i32, i32, i16, i16 }
+%s.9 = type { i8, i32, i32, i32, [10 x i32], [10 x i32], [80 x i32], [80 x i32], [8 x i32], i32, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16 }
+
+; Function Attrs: nounwind
+define fastcc void @f0(%s.0* %a0) #0 {
+b0:
+  %v0 = alloca [40 x i32], align 8
+  %v1 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 5
+  %v2 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 6
+  %v3 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 4
+  %v4 = select i1 undef, i32* %v2, i32* %v1
+  %v5 = load i32, i32* %v4, align 4
+  br i1 false, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v6 = load i32, i32* %v3, align 4
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v7 = phi i32 [ %v6, %b1 ], [ undef, %b0 ]
+  %v8 = shl i32 %v7, 1
+  br i1 undef, label %b3, label %b4
+
+b3:                                               ; preds = %b3, %b2
+  %v9 = phi i32 [ %v34, %b3 ], [ %v5, %b2 ]
+  %v10 = add nsw i32 %v9, 2
+  %v11 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 undef
+  %v12 = load i32, i32* %v11, align 4
+  %v13 = mul nsw i32 %v12, %v8
+  %v14 = ashr i32 %v13, 15
+  %v15 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 %v10
+  %v16 = add nsw i32 %v14, 0
+  store i32 %v16, i32* %v15, align 4
+  %v17 = add nsw i32 %v9, 3
+  %v18 = sub nsw i32 %v17, %v5
+  %v19 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 %v18
+  %v20 = load i32, i32* %v19, align 4
+  %v21 = mul nsw i32 %v20, %v8
+  %v22 = ashr i32 %v21, 15
+  %v23 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 %v17
+  %v24 = add nsw i32 %v22, 0
+  store i32 %v24, i32* %v23, align 4
+  %v25 = add nsw i32 %v9, 6
+  %v26 = sub nsw i32 %v25, %v5
+  %v27 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 %v26
+  %v28 = load i32, i32* %v27, align 4
+  %v29 = mul nsw i32 %v28, %v8
+  %v30 = ashr i32 %v29, 15
+  %v31 = getelementptr inbounds [40 x i32], [40 x i32]* %v0, i32 0, i32 %v25
+  %v32 = load i32, i32* %v31, align 4
+  %v33 = add nsw i32 %v30, %v32
+  store i32 %v33, i32* %v31, align 4
+  %v34 = add nsw i32 %v9, 8
+  %v35 = icmp slt i32 %v34, 33
+  br i1 %v35, label %b3, label %b4
+
+b4:                                               ; preds = %b4, %b3, %b2
+  br label %b4
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-stages3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-stages3.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-stages3.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-stages3.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,65 @@
+; RUN: llc -march=hexagon -enable-pipeliner -pipeliner-max-stages=2 < %s
+; REQUIRES: asserts
+
+; Test that the compiler doesn't seg fault due to incorrect names in epilog.
+
+; Function Attrs: nounwind
+define void @f0(i16* nocapture %a0, i16* nocapture %a1, i16 signext %a2) #0 {
+b0:
+  %v0 = icmp sgt i16 %a2, 0
+  br i1 %v0, label %b1, label %b3
+
+b1:                                               ; preds = %b0
+  %v1 = sext i16 %a2 to i32
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v2 = phi i16 [ %v16, %b2 ], [ undef, %b1 ]
+  %v3 = phi i32 [ %v17, %b2 ], [ 0, %b1 ]
+  %v4 = phi i16* [ undef, %b2 ], [ %a0, %b1 ]
+  %v5 = phi i16* [ %v6, %b2 ], [ %a1, %b1 ]
+  %v6 = getelementptr inbounds i16, i16* %v5, i32 1
+  %v7 = load i16, i16* %v5, align 2, !tbaa !0
+  %v8 = sext i16 %v7 to i32
+  %v9 = tail call i32 @llvm.hexagon.A2.aslh(i32 %v8)
+  %v10 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v9, i32 undef)
+  %v11 = sext i16 %v2 to i32
+  %v12 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %v10, i32 %v11, i32 undef)
+  %v13 = tail call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %v12, i32 undef)
+  %v14 = tail call i32 @llvm.hexagon.A2.addsat(i32 %v13, i32 32768)
+  %v15 = tail call i32 @llvm.hexagon.A2.asrh(i32 %v14)
+  %v16 = trunc i32 %v15 to i16
+  store i16 %v16, i16* %v4, align 2, !tbaa !0
+  %v17 = add i32 %v3, 1
+  %v18 = icmp eq i32 %v17, %v1
+  br i1 %v18, label %b3, label %b2
+
+b3:                                               ; preds = %b2, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.aslh(i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.asrh(i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.addsat(i32, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-subreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-subreg.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-subreg.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-subreg.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,35 @@
+; RUN: llc -march=hexagon -enable-pipeliner -stats -o /dev/null < %s 2>&1 | FileCheck %s --check-prefix=STATS
+; REQUIRES: asserts
+
+; We're unable to pipeline a loop with a subreg as an operand of a Phi.
+
+; STATS-NOT: 1 pipeliner   - Number of loops software pipelined
+
+; Function Attrs: nounwind readnone
+define void @f0(i32 %a0) #0 {
+b0:
+  %v0 = sext i32 %a0 to i64
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = phi i32 [ 805306368, %b0 ], [ %v12, %b1 ]
+  %v2 = phi i32 [ 5, %b0 ], [ %v13, %b1 ]
+  %v3 = sext i32 %v1 to i64
+  %v4 = mul nsw i64 %v3, %v0
+  %v5 = lshr i64 %v4, 32
+  %v6 = trunc i64 %v5 to i32
+  %v7 = sub nsw i32 536870912, %v6
+  %v8 = sext i32 %v7 to i64
+  %v9 = mul nsw i64 %v8, %v3
+  %v10 = lshr i64 %v9, 32
+  %v11 = shl nuw nsw i64 %v10, 4
+  %v12 = trunc i64 %v11 to i32
+  %v13 = add nsw i32 %v2, -1
+  %v14 = icmp eq i32 %v13, 0
+  br i1 %v14, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  ret void
+}
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/swp-swap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-swap.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-swap.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-swap.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,50 @@
+; RUN: llc -march=hexagon -enable-pipeliner -stats -o /dev/null < %s 2>&1 | FileCheck %s --check-prefix=STATS
+; REQUIRES: asserts
+
+; Test that we don't pipeline, incorrectly, the swap operation.
+
+; STATS-NOT: 1 pipeliner   - Number of loops software pipelined
+
+ at g0 = common global i32* null, align 4
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  %v0 = icmp sgt i32 %a2, 0
+  br i1 %v0, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v1 = load i32*, i32** @g0, align 4, !tbaa !0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v2 = phi i32 [ %a0, %b1 ], [ %v9, %b2 ]
+  %v3 = phi i32 [ %a2, %b1 ], [ %v11, %b2 ]
+  %v4 = phi i32 [ %a1, %b1 ], [ %v10, %b2 ]
+  %v5 = getelementptr inbounds i32, i32* %v1, i32 %v2
+  %v6 = load i32, i32* %v5, align 4, !tbaa !4
+  %v7 = getelementptr inbounds i32, i32* %v1, i32 %v4
+  %v8 = load i32, i32* %v7, align 4, !tbaa !4
+  store i32 %v8, i32* %v5, align 4, !tbaa !4
+  store i32 %v6, i32* %v7, align 4, !tbaa !4
+  %v9 = add nsw i32 %v2, 1
+  %v10 = add nsw i32 %v4, 1
+  %v11 = add nsw i32 %v3, -1
+  %v12 = icmp sgt i32 %v11, 0
+  br i1 %v12, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"int", !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/swp-tfri.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-tfri.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/swp-tfri.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/swp-tfri.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,52 @@
+; RUN: llc -march=hexagon -enable-pipeliner -stats -o /dev/null < %s 2>&1 | FileCheck %s --check-prefix=STATS
+; REQUIRES: asserts
+
+; Check that we handle the case when a value is first defined in the loop.
+
+; STATS: 1 pipeliner        - Number of loops software pipelined
+
+; Function Attrs: nounwind
+define fastcc void @f0() #0 {
+b0:
+  br i1 undef, label %b7, label %b1
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b2, label %b4
+
+b2:                                               ; preds = %b1
+  %v0 = load i16, i16* undef, align 2
+  %v1 = load i16, i16* undef, align 2
+  br i1 undef, label %b5, label %b3
+
+b3:                                               ; preds = %b5, %b2
+  %v2 = phi i16 [ 0, %b2 ], [ %v14, %b5 ]
+  br label %b4
+
+b4:                                               ; preds = %b3, %b1
+  br i1 undef, label %b7, label %b6
+
+b5:                                               ; preds = %b5, %b2
+  %v3 = phi i16 [ %v5, %b5 ], [ undef, %b2 ]
+  %v4 = phi i16 [ 0, %b5 ], [ %v1, %b2 ]
+  %v5 = phi i16 [ 0, %b5 ], [ %v0, %b2 ]
+  %v6 = phi i16 [ %v4, %b5 ], [ undef, %b2 ]
+  %v7 = phi i16 [ %v14, %b5 ], [ 0, %b2 ]
+  %v8 = phi i32 [ %v15, %b5 ], [ undef, %b2 ]
+  %v9 = or i16 0, %v7
+  %v10 = lshr i16 %v3, 8
+  %v11 = lshr i16 %v6, 8
+  %v12 = or i16 %v11, %v9
+  %v13 = or i16 0, %v12
+  %v14 = or i16 %v10, %v13
+  %v15 = add nsw i32 %v8, -32
+  %v16 = icmp sgt i32 %v15, 31
+  br i1 %v16, label %b5, label %b3
+
+b6:                                               ; preds = %b4
+  br label %b7
+
+b7:                                               ; preds = %b6, %b4, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/tcm-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/tcm-zext.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/tcm-zext.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/tcm-zext.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,25 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: jump f2
+;
+; Check that we perform tail call merging on return types with zero extend.
+; We want to see a jump to f2, not a call.
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define zeroext i8 @f0() #0 {
+b0:
+  %v0 = tail call zeroext i8 @f2() #0
+  ret i8 %v0
+}
+
+; Function Attrs: nounwind readnone
+define zeroext i8 @f1() #1 {
+b0:
+  ret i8 1
+}
+
+declare zeroext i8 @f2()
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/testbits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/testbits.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/testbits.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/testbits.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK-LABEL: f0:
+; CHECK: p0 = bitsset(r0,r1)
+define i32 @f0(i32 %a0, i32 %a1) #0 {
+b0:
+  %v0 = and i32 %a0, %a1
+  %v1 = icmp eq i32 %v0, %a1
+  %v2 = select i1 %v1, i32 2, i32 3
+  ret i32 %v2
+}
+
+; CHECK-LABEL: f1:
+; CHECK: p0 = bitsclr(r0,r1)
+define i32 @f1(i32 %a0, i32 %a1) #0 {
+b0:
+  %v0 = and i32 %a0, %a1
+  %v1 = icmp eq i32 %v0, 0
+  %v2 = select i1 %v1, i32 2, i32 3
+  ret i32 %v2
+}
+
+; CHECK-LABEL: f2:
+; CHECK: p0 = bitsclr(r0,#37)
+define i32 @f2(i32 %a0) #0 {
+b0:
+  %v0 = and i32 %a0, 37
+  %v1 = icmp eq i32 %v0, 0
+  %v2 = select i1 %v1, i32 2, i32 3
+  ret i32 %v2
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/tfr-mux-nvj.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/tfr-mux-nvj.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/tfr-mux-nvj.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/tfr-mux-nvj.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon -O2 -hexagon-expand-condsets=0 < %s | FileCheck %s
+
+; CHECK: mux
+; CHECK: cmp{{.*\.new}}
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  %v0 = icmp ne i32 %a0, 0
+  %v1 = add nsw i32 %a2, -1
+  %v2 = select i1 %v0, i32 10, i32 %v1
+  %v3 = icmp eq i32 %v2, %a2
+  br i1 %v3, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v4 = shl nsw i32 %a2, 1
+  %v5 = tail call i32 @f1(i32 %v4) #0
+  br label %b3
+
+b2:                                               ; preds = %b0
+  %v6 = tail call i32 @f1(i32 %a0) #0
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v7 = phi i32 [ %v5, %b1 ], [ %v6, %b2 ]
+  ret i32 %v7
+}
+
+declare i32 @f1(i32)
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/tied_oper.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/tied_oper.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/tied_oper.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/tied_oper.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,49 @@
+; RUN: llc -march=hexagon -O3 -verify-machineinstrs -disable-hexagon-peephole < %s
+; REQUIRES: asserts
+
+; This test checks if tied operands are consistent.
+target triple = "hexagon-unknown--elf"
+
+; Function Attrs: nounwind
+define void @f0(i16* nocapture %a0) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b5, %b0
+  %v0 = phi i16* [ %a0, %b0 ], [ %v5, %b5 ]
+  %v1 = phi i16 [ undef, %b0 ], [ %v10, %b5 ]
+  br i1 undef, label %b2, label %b3
+
+b2:                                               ; preds = %b1
+  %v2 = getelementptr inbounds i16, i16* %v0, i32 1
+  %v3 = load i16, i16* %v0, align 2, !tbaa !0
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v4 = phi i16 [ %v3, %b2 ], [ %v1, %b1 ]
+  %v5 = phi i16* [ %v2, %b2 ], [ %v0, %b1 ]
+  %v6 = lshr i16 %v4, 4
+  %v7 = zext i16 %v6 to i32
+  %v8 = and i32 %v7, 15
+  %v9 = icmp ult i32 %v8, 9
+  br i1 %v9, label %b4, label %b5
+
+b4:                                               ; preds = %b3
+  call void @llvm.trap()
+  unreachable
+
+b5:                                               ; preds = %b3
+  %v10 = lshr i16 %v4, 8
+  br label %b1
+}
+
+; Function Attrs: noreturn nounwind
+declare void @llvm.trap() #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { noreturn nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/tls_gd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/tls_gd.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/tls_gd.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/tls_gd.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,25 @@
+; RUN: llc -march=hexagon -O2 -relocation-model=pic < %s | FileCheck %s
+; CHECK: add({{pc|PC}},##_GLOBAL_OFFSET_TABLE_ at PCREL)
+; CHECK: call g1 at GDPLT
+; CHECK: call g0 at GDPLT
+
+ at g0 = external thread_local global i32
+ at g1 = external thread_local global i32
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = load i32, i32* @g1, align 4, !tbaa !0
+  store i32 %v0, i32* @g0, align 4, !tbaa !0
+  tail call void @f1(i32 %v0) #0
+  ret i32 0
+}
+
+declare void @f1(i32)
+
+attributes #0 = { nounwind "target-cpu"="hexagonv5" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/trivialmemaliascheck.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/trivialmemaliascheck.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/trivialmemaliascheck.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/trivialmemaliascheck.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,36 @@
+; RUN: llc -march=hexagon -enable-aa-sched-mi < %s | FileCheck %s
+
+; The two memory addresses in the load and the memop below are trivially
+; non-aliasing. However, there are some cases where the scheduler cannot
+; determine this - in this case, it is because of the use of memops, that on the
+; surface. do not have only one mem operand. However, the backend knows MIs and
+; can step in and help some cases. In our case, if the base registers are the
+; same and the offsets different and the memory access size is such that
+; the two accesses won't overlap, we can tell the scheduler that there is no
+; dependence due to aliasing between the two instructions.
+; In the example below, this allows the load to be packetized with the memop.
+; CHECK: {
+; CHECK:      r{{[0-9]*}} = memw(r{{[0-9]*}}+#4)
+; CHECK-NEXT: memw(r{{[0-9]*}}+#0) += #3
+; CHECK: }
+
+ at g0 = common global [10 x i32] zeroinitializer, align 8
+
+; Function Attrs: nounwind
+define void @f0(i32* nocapture %a0) #0 {
+b0:
+  %v0 = load i32, i32* %a0, align 4, !tbaa !0
+  %v1 = add nsw i32 %v0, 3
+  store i32 %v1, i32* %a0, align 4, !tbaa !0
+  %v2 = getelementptr inbounds i32, i32* %a0, i32 1
+  %v3 = load i32, i32* %v2, align 4, !tbaa !0
+  store i32 %v3, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @g0, i32 0, i32 0), align 8, !tbaa !0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/trunc-mpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/trunc-mpy.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/trunc-mpy.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/trunc-mpy.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,88 @@
+; RUN: llc -march=hexagon -disable-hexagon-peephole < %s  | FileCheck %s
+
+; Test that we're generating a 32-bit multiply high instead of a 64-bit version,
+; when using the high 32-bits only.
+
+; CHECK-LABEL: f0:
+; CHECK-NOT:  r{{[0-9]+}}:{{[0-9]+}} = mpy(
+define void @f0(i32* nocapture readonly %a0, i32* nocapture %a1) #0 {
+b0:
+  %v0 = getelementptr i32, i32* %a1, i32 448
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  br i1 undef, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v1 = getelementptr inbounds i32, i32* %a0, i32 64
+  %v2 = load i32, i32* %a0, align 4
+  %v3 = getelementptr inbounds i32, i32* %a0, i32 2
+  %v4 = load i32, i32* %v1, align 4
+  %v5 = sext i32 %v2 to i64
+  %v6 = sext i32 %v4 to i64
+  %v7 = mul nsw i64 %v6, %v5
+  %v8 = lshr i64 %v7, 32
+  %v9 = trunc i64 %v8 to i32
+  %v10 = sub nsw i32 0, %v9
+  %v11 = getelementptr inbounds i32, i32* %v0, i32 1
+  store i32 %v10, i32* %v1, align 4
+  ret void
+}
+
+; Similar to above, but using the operands of the multiply are expressions.
+
+; CHECK-LABEL: f1:
+; CHECK: r{{[0-9]+}} = mpy(
+define void @f1(i32 %a0, i32 %a1, i32* nocapture readonly %a2, i32* nocapture %a3) #0 {
+b0:
+  %v0 = getelementptr i32, i32* %a3, i32 448
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  br i1 undef, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v1 = getelementptr inbounds i32, i32* %a2, i32 64
+  %v2 = sext i32 %a0 to i64
+  %v3 = sext i32 %a1 to i64
+  %v4 = mul nsw i64 %v3, %v2
+  %v5 = lshr i64 %v4, 32
+  %v6 = trunc i64 %v5 to i32
+  %v7 = sub nsw i32 0, %v6
+  %v8 = getelementptr inbounds i32, i32* %v0, i32 1
+  store i32 %v7, i32* %v1, align 4
+  ret void
+}
+
+; Check that the transform occurs when the loads can be post-incremented.
+
+; CHECK-LABEL: f2:
+; CHECK: r{{[0-9]+}} = mpy(
+define void @f2(i32* nocapture readonly %a0, i32* nocapture %a1) #0 {
+b0:
+  %v0 = getelementptr i32, i32* %a1, i32 448
+  br label %b1
+
+b1:                                               ; preds = %b0
+  %v1 = getelementptr inbounds i32, i32* %a0, i32 64
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v2 = phi i32* [ %v0, %b1 ], [ %v14, %b2 ]
+  %v3 = phi i32* [ %v1, %b1 ], [ undef, %b2 ]
+  %v4 = phi i32* [ null, %b1 ], [ %v6, %b2 ]
+  %v5 = load i32, i32* %v4, align 4
+  %v6 = getelementptr inbounds i32, i32* %v4, i32 2
+  %v7 = load i32, i32* %v3, align 4
+  %v8 = sext i32 %v5 to i64
+  %v9 = sext i32 %v7 to i64
+  %v10 = mul nsw i64 %v9, %v8
+  %v11 = lshr i64 %v10, 32
+  %v12 = trunc i64 %v11 to i32
+  %v13 = sub nsw i32 0, %v12
+  %v14 = getelementptr inbounds i32, i32* %v2, i32 1
+  store i32 %v13, i32* %v2, align 4
+  br label %b2
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/tstbit.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/tstbit.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/tstbit.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/tstbit.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,14 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: tstbit
+
+; Function Attrs: nounwind readnone
+define i32 @f0(i32 %a0, i32 %a1) #0 {
+b0:
+  %v0 = shl i32 1, %a1
+  %v1 = and i32 %v0, %a0
+  %v2 = icmp ne i32 %v1, 0
+  %v3 = zext i1 %v2 to i32
+  ret i32 %v3
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/twoaddressbug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/twoaddressbug.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/twoaddressbug.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/twoaddressbug.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,127 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+
+; This file used to fail with an "UNREACHABLE executed!" in Post-RA pseudo
+; instruction expansion pass due to a bug in the TwoAddressInstructionPass we
+; were not handling sub register indexes when rewriting tied operands.
+
+target triple = "hexagon"
+
+%0 = type { i8, i8, %1, i32, i32, %7, i8, i8, %8, i8, i32, i16, i16, [2500 x i8], i16, i16, i16, i8*, [1024 x i8], i32, i32, i32, i32, i32, i8 }
+%1 = type { i8, %2, i8, i8, i32 }
+%2 = type { %3 }
+%3 = type { i8, [256 x i8], %4, i8, i16, i32 }
+%4 = type { %5 }
+%5 = type { %6 }
+%6 = type { [2 x i64] }
+%7 = type { i32, i8 }
+%8 = type { %7, i32, i32, %1 }
+%9 = type { %10, i8* }
+%10 = type { i16, i16, i32 }
+%11 = type { i8, i32 }
+
+ at g0 = external hidden global [2 x %0], align 8
+ at g1 = external hidden constant %9, align 4
+ at g2 = external hidden constant %9, align 4
+ at g3 = external hidden constant %9, align 4
+ at g4 = external hidden constant %9, align 4
+
+; Function Attrs: optsize
+declare void @f0(%9*, i32, i32, i32) #0
+
+; Function Attrs: nounwind optsize ssp
+define hidden fastcc void @f1(i64 %a0, i8 zeroext %a1, i8 zeroext %a2) #1 {
+b0:
+  %v0 = alloca %11, align 4
+  %v1 = icmp ne i8 %a1, 0
+  %v2 = trunc i64 %a0 to i32
+  br i1 %v1, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  call void @f0(%9* @g1, i32 %v2, i32 0, i32 0) #2
+  %v3 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 7
+  store i8 1, i8* %v3, align 1
+  %v4 = icmp eq i8 %a2, 0
+  br i1 %v4, label %b4, label %b2
+
+b2:                                               ; preds = %b1
+  %v5 = getelementptr inbounds %11, %11* %v0, i32 0, i32 0
+  store i8 0, i8* %v5, align 4
+  %v6 = getelementptr inbounds %11, %11* %v0, i32 0, i32 1
+  store i32 0, i32* %v6, align 4
+  %v7 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 3
+  %v8 = load i32, i32* %v7, align 8
+  %v9 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 4
+  %v10 = load i32, i32* %v9, align 4
+  %v11 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 19
+  %v12 = load i32, i32* %v11, align 4
+  %v13 = call zeroext i8 @f2(i64 %a0, i32 %v8, i32 %v10, i32 %v12, i8 zeroext 0, %11* %v0) #2
+  %v14 = icmp eq i8 %v13, 0
+  br i1 %v14, label %b4, label %b3
+
+b3:                                               ; preds = %b2
+  %v15 = zext i8 %v13 to i32
+  call void @f0(%9* @g2, i32 %v15, i32 %v2, i32 0) #2
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2, %b1, %b0
+  %v16 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 1
+  %v17 = load i8, i8* %v16, align 1
+  %v18 = zext i8 %v17 to i32
+  switch i32 %v18, label %b14 [
+    i32 2, label %b11
+    i32 1, label %b5
+    i32 4, label %b8
+    i32 3, label %b11
+  ]
+
+b5:                                               ; preds = %b4
+  call void @f0(%9* @g3, i32 %v2, i32 0, i32 0) #2
+  br i1 %v1, label %b7, label %b6
+
+b6:                                               ; preds = %b5
+  call fastcc void @f3(i64 %a0, i8 zeroext 0, i8 zeroext 1, i32 1) #0
+  br label %b14
+
+b7:                                               ; preds = %b5
+  call fastcc void @f3(i64 %a0, i8 zeroext 0, i8 zeroext 0, i32 1) #0
+  br label %b14
+
+b8:                                               ; preds = %b4
+  call void @f0(%9* @g4, i32 %v2, i32 0, i32 0) #2
+  %v19 = getelementptr inbounds [2 x %0], [2 x %0]* @g0, i32 0, i32 %v2, i32 6
+  store i8 1, i8* %v19, align 8
+  br i1 %v1, label %b10, label %b9
+
+b9:                                               ; preds = %b8
+  call fastcc void @f3(i64 %a0, i8 zeroext 0, i8 zeroext 1, i32 1) #0
+  br label %b14
+
+b10:                                              ; preds = %b8
+  call fastcc void @f3(i64 %a0, i8 zeroext 0, i8 zeroext 0, i32 1) #0
+  br label %b14
+
+b11:                                              ; preds = %b4, %b4
+  br i1 %v1, label %b13, label %b12
+
+b12:                                              ; preds = %b11
+  call fastcc void @f3(i64 %a0, i8 zeroext 0, i8 zeroext 1, i32 1) #0
+  br label %b14
+
+b13:                                              ; preds = %b11
+  call fastcc void @f3(i64 %a0, i8 zeroext 0, i8 zeroext 0, i32 1) #0
+  br label %b14
+
+b14:                                              ; preds = %b13, %b12, %b10, %b9, %b7, %b6, %b4
+  ret void
+}
+
+; Function Attrs: optsize
+declare zeroext i8 @f2(i64, i32, i32, i32, i8 zeroext, %11*) #0
+
+; Function Attrs: nounwind optsize ssp
+declare hidden fastcc void @f3(i64, i8 zeroext, i8 zeroext, i32) #1
+
+attributes #0 = { optsize }
+attributes #1 = { nounwind optsize ssp }
+attributes #2 = { nounwind optsize }

Added: llvm/trunk/test/CodeGen/Hexagon/undef-ret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/undef-ret.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/undef-ret.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/undef-ret.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,19 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+; Check for direct use of r0 in addadd.
+; CHECK: = add(r0,add(r1,#2))
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i32 %a1, i32* nocapture %a2) #0 {
+b0:
+  %v0 = add nsw i32 %a0, 2
+  %v1 = add nsw i32 %v0, %a1
+  store i32 %v1, i32* %a2, align 4, !tbaa !0
+  ret i32 undef
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/unordered-fcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/unordered-fcmp.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/unordered-fcmp.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/unordered-fcmp.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,61 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Check that we generate correct set of instructions for unordered
+; floating-point compares.
+
+; CHECK-LABEL: f0:
+; CHECK-DAG: [[PREG1:p[0-3]+]] = sfcmp.eq(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-DAG: [[PREG2:p[0-3]+]] = sfcmp.uo(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: p{{[0-3]+}} = or([[PREG2]],![[PREG1]])
+define float @f0(float %a0, float %a1, float %a2) #0 {
+b0:
+  %v0 = fcmp une float %a0, 0.000000e+00
+  %v1 = select i1 %v0, float %a2, float 0.000000e+00
+  ret float %v1
+}
+
+; CHECK-LABEL: f1:
+; CHECK-DAG: [[PREG1:p[0-3]+]] = sfcmp.ge(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-DAG: [[PREG2:p[0-3]+]] = sfcmp.uo(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: p{{[0-3]+}} = or([[PREG2]],[[PREG1]])
+define float @f1(float %a0, float %a1, float %a2) #0 {
+b0:
+  %v0 = fcmp uge float %a0, 0.000000e+00
+  %v1 = select i1 %v0, float %a2, float 0.000000e+00
+  ret float %v1
+}
+
+; CHECK-LABEL: f2:
+; CHECK-DAG: [[PREG1:p[0-3]+]] = sfcmp.gt(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-DAG: [[PREG2:p[0-3]+]] = sfcmp.uo(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: p{{[0-3]+}} = or([[PREG2]],[[PREG1]])
+define float @f2(float %a0, float %a1, float %a2) #0 {
+b0:
+  %v0 = fcmp ugt float %a0, 0.000000e+00
+  %v1 = select i1 %v0, float %a2, float 0.000000e+00
+  ret float %v1
+}
+
+; CHECK-LABEL: f3:
+; CHECK-DAG: [[PREG1:p[0-3]+]] = sfcmp.ge(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-DAG: [[PREG2:p[0-3]+]] = sfcmp.uo(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: p{{[0-3]+}} = or([[PREG2]],[[PREG1]])
+define float @f3(float %a0, float %a1, float %a2) #0 {
+b0:
+  %v0 = fcmp ule float %a0, 0.000000e+00
+  %v1 = select i1 %v0, float %a2, float 0.000000e+00
+  ret float %v1
+}
+
+; CHECK-LABEL: f4:
+; CHECK-DAG: [[PREG1:p[0-3]+]] = sfcmp.gt(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-DAG: [[PREG2:p[0-3]+]] = sfcmp.uo(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: p{{[0-3]+}} = or([[PREG2]],[[PREG1]])
+define float @f4(float %a0, float %a1, float %a2) #0 {
+b0:
+  %v0 = fcmp ult float %a0, 0.000000e+00
+  %v1 = select i1 %v0, float %a2, float 0.000000e+00
+  ret float %v1
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/upper-mpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/upper-mpy.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/upper-mpy.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/upper-mpy.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,87 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that we generate multiple using upper result.
+
+; CHECK: = mpy(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: = mpy(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: = mpy(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: = mpy(r{{[0-9]+}},r{{[0-9]+}})
+
+ at g0 = external constant [1152 x i32], align 8
+ at g1 = external constant [2 x i32], align 8
+
+; Function Attrs: nounwind
+define void @f0(i32* nocapture readonly %a0, i32* %a1, i32* nocapture %a2, i32 %a3, i32 %a4) #0 {
+b0:
+  %v0 = getelementptr inbounds i32, i32* %a0, i32 512
+  %v1 = getelementptr inbounds i32, i32* %a0, i32 511
+  %v2 = getelementptr inbounds i32, i32* %a2, i32 1023
+  %v3 = getelementptr inbounds i32, i32* %a1, i32 1023
+  br label %b1
+
+b1:                                               ; preds = %b0
+  %v4 = load i32, i32* getelementptr inbounds ([2 x i32], [2 x i32]* @g1, i32 0, i32 1), align 4
+  %v5 = getelementptr inbounds [1152 x i32], [1152 x i32]* @g0, i32 0, i32 %v4
+  br label %b2
+
+b2:                                               ; preds = %b1
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v6 = phi i32* [ %v30, %b3 ], [ %a2, %b2 ]
+  %v7 = phi i32* [ %v44, %b3 ], [ %a1, %b2 ]
+  %v8 = phi i32* [ %v17, %b3 ], [ %v0, %b2 ]
+  %v9 = phi i32* [ %v34, %b3 ], [ %v1, %b2 ]
+  %v10 = phi i32* [ %v40, %b3 ], [ %v3, %b2 ]
+  %v11 = phi i32* [ %v33, %b3 ], [ %v2, %b2 ]
+  %v12 = phi i32* [ %v15, %b3 ], [ %v5, %b2 ]
+  %v13 = getelementptr inbounds i32, i32* %v12, i32 1
+  %v14 = load i32, i32* %v12, align 4
+  %v15 = getelementptr inbounds i32, i32* %v12, i32 2
+  %v16 = load i32, i32* %v13, align 4
+  %v17 = getelementptr inbounds i32, i32* %v8, i32 1
+  %v18 = load i32, i32* %v8, align 4
+  %v19 = sext i32 %v14 to i64
+  %v20 = sext i32 %v18 to i64
+  %v21 = mul nsw i64 %v20, %v19
+  %v22 = lshr i64 %v21, 32
+  %v23 = trunc i64 %v22 to i32
+  %v24 = sext i32 %v16 to i64
+  %v25 = mul nsw i64 %v20, %v24
+  %v26 = lshr i64 %v25, 32
+  %v27 = trunc i64 %v26 to i32
+  %v28 = load i32, i32* %v7, align 4
+  %v29 = sub nsw i32 %v28, %v23
+  %v30 = getelementptr inbounds i32, i32* %v6, i32 1
+  store i32 %v29, i32* %v6, align 4
+  %v31 = load i32, i32* %v10, align 4
+  %v32 = add nsw i32 %v27, %v31
+  %v33 = getelementptr inbounds i32, i32* %v11, i32 -1
+  store i32 %v32, i32* %v11, align 4
+  %v34 = getelementptr inbounds i32, i32* %v9, i32 -1
+  %v35 = load i32, i32* %v9, align 4
+  %v36 = sext i32 %v35 to i64
+  %v37 = mul nsw i64 %v36, %v19
+  %v38 = lshr i64 %v37, 32
+  %v39 = trunc i64 %v38 to i32
+  %v40 = getelementptr inbounds i32, i32* %v10, i32 -1
+  store i32 %v39, i32* %v10, align 4
+  %v41 = mul nsw i64 %v36, %v24
+  %v42 = lshr i64 %v41, 32
+  %v43 = trunc i64 %v42 to i32
+  %v44 = getelementptr inbounds i32, i32* %v7, i32 1
+  store i32 %v43, i32* %v7, align 4
+  %v45 = icmp ult i32* %v44, %v40
+  br i1 %v45, label %b3, label %b4
+
+b4:                                               ; preds = %b3
+  br label %b6
+
+b5:                                               ; No predecessors!
+  br label %b6
+
+b6:                                               ; preds = %b5, %b4
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/v5_insns.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v5_insns.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v5_insns.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v5_insns.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,90 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon"
+
+; CHECK-LABEL: f0:
+; CHECK: r{{[0-9]+}} = cmpyiwh(r{{[0-9]}}:{{[0-9]}},r{{[0-9]+}}*):<<1:rnd:sat
+define i32 @f0(double %a0) {
+b0:
+  %v0 = alloca i8, align 1
+  %v1 = fptosi double %a0 to i64
+  %v2 = tail call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %v1, i32 512)
+  %v3 = trunc i32 %v2 to i8
+  store volatile i8 %v3, i8* %v0, align 1
+  %v4 = load volatile i8, i8* %v0, align 1
+  %v5 = zext i8 %v4 to i32
+  ret i32 %v5
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32) #0
+
+; CHECK-LABEL: f1:
+; CHECK: r{{[0-9]+}} = cmpyrwh(r{{[0-9]}}:{{[0-9]}},r{{[0-9]+}}*):<<1:rnd:sat
+define i32 @f1(double %a0) {
+b0:
+  %v0 = alloca i8, align 1
+  %v1 = fptosi double %a0 to i64
+  %v2 = tail call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %v1, i32 512)
+  %v3 = trunc i32 %v2 to i8
+  store volatile i8 %v3, i8* %v0, align 1
+  %v4 = load volatile i8, i8* %v0, align 1
+  %v5 = zext i8 %v4 to i32
+  ret i32 %v5
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32) #0
+
+; CHECK-LABEL: f2:
+; CHECK: r{{[0-9]+}} = popcount(r{{[0-9]}}:{{[0-9]}})
+define i32 @f2(double %a0) {
+b0:
+  %v0 = alloca i8, align 1
+  %v1 = fptosi double %a0 to i64
+  %v2 = tail call i32 @llvm.hexagon.S5.popcountp(i64 %v1)
+  %v3 = trunc i32 %v2 to i8
+  store volatile i8 %v3, i8* %v0, align 1
+  %v4 = load volatile i8, i8* %v0, align 1
+  %v5 = zext i8 %v4 to i32
+  ret i32 %v5
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S5.popcountp(i64) #0
+
+; CHECK-LABEL: f3:
+; CHECK: p{{[0-3]+}} = sfclass(r{{[0-9]}},#3)
+define i32 @f3(float %a0) {
+b0:
+  %v0 = alloca i8, align 1
+  %v1 = tail call i32 @llvm.hexagon.F2.sfclass(float %a0, i32 3)
+  %v2 = trunc i32 %v1 to i8
+  store volatile i8 %v2, i8* %v0, align 1
+  %v3 = load volatile i8, i8* %v0, align 1
+  %v4 = zext i8 %v3 to i32
+  ret i32 %v4
+}
+
+; Function Attrs: readnone
+declare i32 @llvm.hexagon.F2.sfclass(float, i32) #1
+
+; CHECK-LABEL: f4:
+; CHECK: r{{[0-9]+}} = vasrhub(r{{[0-9]}}:{{[0-9]}},#3):sat
+define i32 @f4(float %a0) {
+b0:
+  %v0 = alloca i8, align 1
+  %v1 = fptosi float %a0 to i64
+  %v2 = tail call i32 @llvm.hexagon.S5.asrhub.sat(i64 %v1, i32 3)
+  %v3 = trunc i32 %v2 to i8
+  store volatile i8 %v3, i8* %v0, align 1
+  %v4 = load volatile i8, i8* %v0, align 1
+  %v5 = zext i8 %v4 to i32
+  ret i32 %v5
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S5.asrhub.sat(i64, i32) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/v6-inlasm1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6-inlasm1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6-inlasm1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6-inlasm1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,114 @@
+; RUN: llc -march=hexagon -O2 -disable-hexagon-shuffle=1 < %s | FileCheck %s
+; CHECK: vmemu(r{{[0-9]+}}) = v{{[0-9]*}};
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i8* %a0, i32 %a1, i8* %a2, i32 %a3, i8* %a4) #0 {
+b0:
+  %v0 = alloca i8*, align 4
+  %v1 = alloca i32, align 4
+  %v2 = alloca i8*, align 4
+  %v3 = alloca i32, align 4
+  %v4 = alloca i8*, align 4
+  %v5 = alloca i32, align 4
+  %v6 = alloca i32, align 4
+  %v7 = alloca i32, align 4
+  %v8 = alloca i32, align 4
+  %v9 = alloca i32, align 4
+  %v10 = alloca <16 x i32>, align 64
+  %v11 = alloca <16 x i32>, align 64
+  %v12 = alloca <16 x i32>, align 64
+  %v13 = alloca <16 x i32>, align 64
+  %v14 = alloca <16 x i32>, align 64
+  %v15 = alloca <16 x i32>, align 64
+  %v16 = alloca <16 x i32>, align 64
+  %v17 = alloca <16 x i32>, align 64
+  %v18 = alloca <16 x i32>, align 64
+  %v19 = alloca <16 x i32>, align 64
+  %v20 = alloca <16 x i32>, align 64
+  store i8* %a0, i8** %v0, align 4
+  store i32 %a1, i32* %v1, align 4
+  store i8* %a2, i8** %v2, align 4
+  store i32 %a3, i32* %v3, align 4
+  store i8* %a4, i8** %v4, align 4
+  %v21 = load i32, i32* %v1, align 4
+  %v22 = ashr i32 %v21, 16
+  %v23 = and i32 65535, %v22
+  store i32 %v23, i32* %v8, align 4
+  %v24 = load i32, i32* %v1, align 4
+  %v25 = and i32 65535, %v24
+  store i32 %v25, i32* %v5, align 4
+  %v26 = load i32, i32* %v3, align 4
+  %v27 = and i32 65535, %v26
+  store i32 %v27, i32* %v6, align 4
+  %v28 = load i32, i32* %v3, align 4
+  %v29 = ashr i32 %v28, 16
+  %v30 = and i32 65535, %v29
+  store i32 %v30, i32* %v9, align 4
+  %v31 = load i8*, i8** %v4, align 4
+  %v32 = bitcast i8* %v31 to <16 x i32>*
+  %v33 = load <16 x i32>, <16 x i32>* %v32, align 64
+  store <16 x i32> %v33, <16 x i32>* %v10, align 64
+  %v34 = load i8*, i8** %v4, align 4
+  %v35 = getelementptr inbounds i8, i8* %v34, i32 64
+  %v36 = bitcast i8* %v35 to <16 x i32>*
+  %v37 = load <16 x i32>, <16 x i32>* %v36, align 64
+  store <16 x i32> %v37, <16 x i32>* %v12, align 64
+  %v38 = load i32, i32* %v9, align 4
+  store i32 %v38, i32* %v7, align 4
+  br label %b1
+
+b1:                                               ; preds = %b3, %b0
+  %v39 = load i32, i32* %v7, align 4
+  %v40 = icmp sge i32 %v39, 0
+  br i1 %v40, label %b2, label %b4
+
+b2:                                               ; preds = %b1
+  %v41 = load i8*, i8** %v0, align 4
+  %v42 = bitcast i8* %v41 to <16 x i32>*
+  %v43 = load <16 x i32>, <16 x i32>* %v42, align 4
+  store <16 x i32> %v43, <16 x i32>* %v14, align 64
+  %v44 = load i32, i32* %v5, align 4
+  %v45 = load i8*, i8** %v0, align 4
+  %v46 = getelementptr inbounds i8, i8* %v45, i32 %v44
+  store i8* %v46, i8** %v0, align 4
+  %v47 = load <16 x i32>, <16 x i32>* %v14, align 64
+  %v48 = load <16 x i32>, <16 x i32>* %v10, align 64
+  %v49 = call <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32> %v47, <16 x i32> %v48)
+  store <16 x i32> %v49, <16 x i32>* %v15, align 64
+  %v50 = load <16 x i32>, <16 x i32>* %v14, align 64
+  %v51 = load <16 x i32>, <16 x i32>* %v12, align 64
+  %v52 = call <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32> %v50, <16 x i32> %v51)
+  store <16 x i32> %v52, <16 x i32>* %v17, align 64
+  %v53 = load <16 x i32>, <16 x i32>* %v15, align 64
+  %v54 = load <16 x i32>, <16 x i32>* %v17, align 64
+  %v55 = call <16 x i32> @llvm.hexagon.V6.vavgub(<16 x i32> %v53, <16 x i32> %v54)
+  store <16 x i32> %v55, <16 x i32>* %v19, align 64
+  %v56 = load i8*, i8** %v2, align 4
+  %v57 = load <16 x i32>, <16 x i32>* %v19, align 64
+  call void asm sideeffect "  vmemu($0) = $1;\0A", "r,v,~{memory}"(i8* %v56, <16 x i32> %v57) #2, !srcloc !0
+  br label %b3
+
+b3:                                               ; preds = %b2
+  %v58 = load i32, i32* %v6, align 4
+  %v59 = load i32, i32* %v7, align 4
+  %v60 = sub nsw i32 %v59, %v58
+  store i32 %v60, i32* %v7, align 4
+  br label %b1
+
+b4:                                               ; preds = %b1
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vavgub(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+
+!0 = !{i32 1708}

Added: llvm/trunk/test/CodeGen/Hexagon/v6-inlasm2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6-inlasm2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6-inlasm2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6-inlasm2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon -O2 -disable-hexagon-shuffle=1 < %s | FileCheck %s
+; CHECK: vmemu(r{{[0-9]+}}) = v{{[0-9]*}};
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i8* %a0, i8* %a1) #0 {
+b0:
+  %v0 = alloca i8*, align 4
+  %v1 = alloca i8*, align 4
+  %v2 = alloca <16 x i32>, align 64
+  store i8* %a0, i8** %v0, align 4
+  store i8* %a1, i8** %v1, align 4
+  %v3 = load i8*, i8** %v0, align 4
+  %v4 = load <16 x i32>, <16 x i32>* %v2, align 64
+  call void asm sideeffect "  $1 = vmemu($0);\0A", "r,v"(i8* %v3, <16 x i32> %v4) #1, !srcloc !0
+  %v5 = load i8*, i8** %v1, align 4
+  %v6 = load <16 x i32>, <16 x i32>* %v2, align 64
+  call void asm sideeffect "  vmemu($0) = $1;\0A", "r,v,~{memory}"(i8* %v5, <16 x i32> %v6) #1, !srcloc !1
+  ret void
+}
+
+; Function Attrs: nounwind
+define i32 @f1() #0 {
+b0:
+  ret i32 0
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind }
+
+!0 = !{i32 233}
+!1 = !{i32 307}

Added: llvm/trunk/test/CodeGen/Hexagon/v6-inlasm3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6-inlasm3.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6-inlasm3.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6-inlasm3.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,44 @@
+; RUN: llc -march=hexagon -O2 -disable-hexagon-shuffle=1 < %s | FileCheck %s
+; CHECK: vmemu(r{{[0-9]+}}) = v{{[0-9]*}}
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i8* %a0, i8* %a1) #0 {
+b0:
+  %v0 = alloca i8*, align 4
+  %v1 = alloca i8*, align 4
+  %v2 = alloca <16 x i32>, align 64
+  %v3 = alloca <16 x i32>, align 64
+  %v4 = alloca <32 x i32>, align 128
+  store i8* %a0, i8** %v0, align 4
+  store i8* %a1, i8** %v1, align 4
+  %v5 = load i8*, i8** %v0, align 4
+  %v6 = load <16 x i32>, <16 x i32>* %v2, align 64
+  call void asm sideeffect "  $1 = vmemu($0);\0A", "r,v"(i8* %v5, <16 x i32> %v6) #1, !srcloc !0
+  %v7 = load i8*, i8** %v0, align 4
+  %v8 = load <16 x i32>, <16 x i32>* %v3, align 64
+  call void asm sideeffect "  $1 = vmemu($0);\0A", "r,v"(i8* %v7, <16 x i32> %v8) #1, !srcloc !1
+  %v9 = load <32 x i32>, <32 x i32>* %v4, align 128
+  %v10 = load <16 x i32>, <16 x i32>* %v2, align 64
+  %v11 = load <16 x i32>, <16 x i32>* %v3, align 64
+  call void asm sideeffect "  $0 = vcombine($1,$2);\0A", "v,v,v"(<32 x i32> %v9, <16 x i32> %v10, <16 x i32> %v11) #1, !srcloc !2
+  %v12 = load i8*, i8** %v1, align 4
+  %v13 = load <16 x i32>, <16 x i32>* %v2, align 64
+  call void asm sideeffect "  vmemu($0) = $1;\0A", "r,v,~{memory}"(i8* %v12, <16 x i32> %v13) #1, !srcloc !3
+  ret void
+}
+
+; Function Attrs: nounwind
+define i32 @f1() #0 {
+b0:
+  ret i32 0
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind }
+
+!0 = !{i32 272}
+!1 = !{i32 348}
+!2 = !{i32 424}
+!3 = !{i32 519}

Added: llvm/trunk/test/CodeGen/Hexagon/v6-inlasm4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6-inlasm4.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6-inlasm4.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6-inlasm4.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,29 @@
+; RUN: llc -march=hexagon -O2 -disable-hexagon-shuffle=1 < %s | FileCheck %s
+; CHECK: q{{[0-3]}} = vsetq(r{{[0-9]+}})
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0, <16 x i32> %a1) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca <16 x i32>, align 64
+  %v2 = alloca <16 x i32>, align 64
+  store i32 %a0, i32* %v0, align 4
+  store <16 x i32> %a1, <16 x i32>* %v1, align 64
+  %v3 = load i32, i32* %v0, align 4
+  %v4 = load <16 x i32>, <16 x i32>* %v2, align 64
+  call void asm sideeffect "  $1 = vsetq($0);\0A", "r,q"(i32 %v3, <16 x i32> %v4) #1, !srcloc !0
+  ret void
+}
+
+; Function Attrs: nounwind
+define i32 @f1() #0 {
+b0:
+  ret i32 0
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind }
+
+!0 = !{i32 222}

Added: llvm/trunk/test/CodeGen/Hexagon/v6-shuffl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6-shuffl.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6-shuffl.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6-shuffl.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,54 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+; CHECK: vsplat
+
+target triple = "hexagon"
+
+ at g0 = common global [15 x <16 x i32>] zeroinitializer, align 64
+ at g1 = common global [15 x <32 x i32>] zeroinitializer, align 128
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  %v0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
+  store <16 x i32> %v0, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 0), align 64, !tbaa !0
+  %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2)
+  store <16 x i32> %v1, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 1), align 64, !tbaa !0
+  %v2 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v0, <16 x i32> %v1)
+  store <32 x i32> %v2, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 0), align 128, !tbaa !0
+  store <32 x i32> %v2, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 1), align 128, !tbaa !0
+  %v3 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 3)
+  store <16 x i32> %v3, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 2), align 64, !tbaa !0
+  %v4 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 4)
+  store <16 x i32> %v4, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 3), align 64, !tbaa !0
+  %v5 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v3, <16 x i32> %v4)
+  store <32 x i32> %v5, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 2), align 128, !tbaa !0
+  store <32 x i32> %v5, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 3), align 128, !tbaa !0
+  %v6 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 5)
+  store <16 x i32> %v6, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 4), align 64, !tbaa !0
+  %v7 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 6)
+  store <16 x i32> %v7, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 5), align 64, !tbaa !0
+  %v8 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v6, <16 x i32> %v7)
+  store <32 x i32> %v8, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 4), align 128, !tbaa !0
+  store <32 x i32> %v8, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 5), align 128, !tbaa !0
+  %v9 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 7)
+  store <16 x i32> %v9, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 6), align 64, !tbaa !0
+  %v10 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 8)
+  store <16 x i32> %v10, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 7), align 64, !tbaa !0
+  %v11 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v9, <16 x i32> %v10)
+  store <32 x i32> %v11, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 6), align 128, !tbaa !0
+  store <32 x i32> %v11, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g1, i32 0, i32 7), align 128, !tbaa !0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/v6-spill1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6-spill1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6-spill1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6-spill1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,267 @@
+; RUN: llc -march=hexagon -O2 -pipeliner-max-mii=10 < %s | FileCheck %s
+; CHECK-NOT: vmemu
+
+; Function Attrs: nounwind
+define void @f0(i8* nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i16* nocapture %a4, i16* nocapture %a5) #0 {
+b0:
+  %v0 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %a3)
+  %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v0)
+  %v2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 16843009)
+  %v3 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
+  %v4 = sdiv i32 %a2, 64
+  %v5 = icmp sgt i32 %a2, 63
+  br i1 %v5, label %b1, label %b6
+
+b1:                                               ; preds = %b0
+  %v6 = bitcast i16* %a5 to <16 x i32>*
+  %v7 = bitcast i16* %a4 to <16 x i32>*
+  %v8 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v3, <16 x i32> %v3)
+  br label %b2
+
+b2:                                               ; preds = %b4, %b1
+  %v9 = phi i32 [ 0, %b1 ], [ %v100, %b4 ]
+  %v10 = phi i8* [ %a0, %b1 ], [ %v87, %b4 ]
+  %v11 = phi <16 x i32>* [ %v6, %b1 ], [ %v99, %b4 ]
+  %v12 = phi <16 x i32>* [ %v7, %b1 ], [ %v95, %b4 ]
+  %v13 = bitcast i8* %v10 to <16 x i32>*
+  %v14 = load <16 x i32>, <16 x i32>* %v13, align 64, !tbaa !0
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v15 = phi i32 [ -4, %b2 ], [ %v83, %b3 ]
+  %v16 = phi <32 x i32> [ %v8, %b2 ], [ %v78, %b3 ]
+  %v17 = phi <16 x i32> [ %v3, %b2 ], [ %v82, %b3 ]
+  %v18 = mul nsw i32 %v15, %a1
+  %v19 = getelementptr inbounds i8, i8* %v10, i32 %v18
+  %v20 = bitcast i8* %v19 to <16 x i32>*
+  %v21 = add i32 %v18, -64
+  %v22 = getelementptr inbounds i8, i8* %v10, i32 %v21
+  %v23 = bitcast i8* %v22 to <16 x i32>*
+  %v24 = load <16 x i32>, <16 x i32>* %v23, align 64, !tbaa !0
+  %v25 = load <16 x i32>, <16 x i32>* %v20, align 64, !tbaa !0
+  %v26 = add i32 %v18, 64
+  %v27 = getelementptr inbounds i8, i8* %v10, i32 %v26
+  %v28 = bitcast i8* %v27 to <16 x i32>*
+  %v29 = load <16 x i32>, <16 x i32>* %v28, align 64, !tbaa !0
+  %v30 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v25, <16 x i32> %v14)
+  %v31 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v30, <16 x i32> %v1)
+  %v32 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v31, <16 x i32> %v3, <16 x i32> %v25)
+  %v33 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v16, <16 x i32> %v32, i32 16843009)
+  %v34 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v31, <16 x i32> %v17, <16 x i32> %v2)
+  %v35 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v25, <16 x i32> %v24, i32 1)
+  %v36 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v29, <16 x i32> %v25, i32 1)
+  %v37 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v25, <16 x i32> %v24, i32 2)
+  %v38 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v29, <16 x i32> %v25, i32 2)
+  %v39 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v35, <16 x i32> %v14)
+  %v40 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v36, <16 x i32> %v14)
+  %v41 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v37, <16 x i32> %v14)
+  %v42 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v38, <16 x i32> %v14)
+  %v43 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v39, <16 x i32> %v1)
+  %v44 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v40, <16 x i32> %v1)
+  %v45 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v41, <16 x i32> %v1)
+  %v46 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v42, <16 x i32> %v1)
+  %v47 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v43, <16 x i32> %v3, <16 x i32> %v35)
+  %v48 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v44, <16 x i32> %v3, <16 x i32> %v36)
+  %v49 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v45, <16 x i32> %v3, <16 x i32> %v37)
+  %v50 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v46, <16 x i32> %v3, <16 x i32> %v38)
+  %v51 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v48, <16 x i32> %v47)
+  %v52 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v33, <32 x i32> %v51, i32 16843009)
+  %v53 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v50, <16 x i32> %v49)
+  %v54 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v52, <32 x i32> %v53, i32 16843009)
+  %v55 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v43, <16 x i32> %v34, <16 x i32> %v2)
+  %v56 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v44, <16 x i32> %v55, <16 x i32> %v2)
+  %v57 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v45, <16 x i32> %v56, <16 x i32> %v2)
+  %v58 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v46, <16 x i32> %v57, <16 x i32> %v2)
+  %v59 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v25, <16 x i32> %v24, i32 3)
+  %v60 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v29, <16 x i32> %v25, i32 3)
+  %v61 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v25, <16 x i32> %v24, i32 4)
+  %v62 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v29, <16 x i32> %v25, i32 4)
+  %v63 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v59, <16 x i32> %v14)
+  %v64 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v60, <16 x i32> %v14)
+  %v65 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v61, <16 x i32> %v14)
+  %v66 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v62, <16 x i32> %v14)
+  %v67 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v63, <16 x i32> %v1)
+  %v68 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v64, <16 x i32> %v1)
+  %v69 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v65, <16 x i32> %v1)
+  %v70 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v66, <16 x i32> %v1)
+  %v71 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v67, <16 x i32> %v3, <16 x i32> %v59)
+  %v72 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v68, <16 x i32> %v3, <16 x i32> %v60)
+  %v73 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v69, <16 x i32> %v3, <16 x i32> %v61)
+  %v74 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v70, <16 x i32> %v3, <16 x i32> %v62)
+  %v75 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v72, <16 x i32> %v71)
+  %v76 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v54, <32 x i32> %v75, i32 16843009)
+  %v77 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v74, <16 x i32> %v73)
+  %v78 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v76, <32 x i32> %v77, i32 16843009)
+  %v79 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v67, <16 x i32> %v58, <16 x i32> %v2)
+  %v80 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v68, <16 x i32> %v79, <16 x i32> %v2)
+  %v81 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v69, <16 x i32> %v80, <16 x i32> %v2)
+  %v82 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v70, <16 x i32> %v81, <16 x i32> %v2)
+  %v83 = add nsw i32 %v15, 1
+  %v84 = icmp eq i32 %v83, 5
+  br i1 %v84, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  %v85 = phi <16 x i32> [ %v82, %b3 ]
+  %v86 = phi <32 x i32> [ %v78, %b3 ]
+  %v87 = getelementptr inbounds i8, i8* %v10, i32 64
+  %v88 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v86)
+  %v89 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v86)
+  %v90 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v88, <16 x i32> %v89, i32 -2)
+  %v91 = tail call <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32> %v85)
+  %v92 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v90)
+  %v93 = getelementptr inbounds <16 x i32>, <16 x i32>* %v12, i32 1
+  store <16 x i32> %v92, <16 x i32>* %v12, align 64, !tbaa !0
+  %v94 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v90)
+  %v95 = getelementptr inbounds <16 x i32>, <16 x i32>* %v12, i32 2
+  store <16 x i32> %v94, <16 x i32>* %v93, align 64, !tbaa !0
+  %v96 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v91)
+  %v97 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 1
+  store <16 x i32> %v96, <16 x i32>* %v11, align 64, !tbaa !0
+  %v98 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v91)
+  %v99 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 2
+  store <16 x i32> %v98, <16 x i32>* %v97, align 64, !tbaa !0
+  %v100 = add nsw i32 %v9, 1
+  %v101 = icmp slt i32 %v100, %v4
+  br i1 %v101, label %b2, label %b5
+
+b5:                                               ; preds = %b4
+  br label %b6
+
+b6:                                               ; preds = %b5, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.vsplatrb(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vd0() #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32>) #1
+
+; Function Attrs: nounwind
+define void @f1(i16* nocapture readonly %a0, i16* nocapture readonly %a1, i16* nocapture readonly %a2, i32 %a3, i8* nocapture %a4) #0 {
+b0:
+  %v0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 983055)
+  %v1 = sdiv i32 %a3, 64
+  %v2 = icmp sgt i32 %a3, 63
+  br i1 %v2, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v3 = bitcast i8* %a4 to <16 x i32>*
+  %v4 = bitcast i16* %a1 to <16 x i32>*
+  %v5 = bitcast i16* %a2 to <16 x i32>*
+  %v6 = bitcast i16* %a0 to <16 x i32>*
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v7 = phi i32 [ 0, %b1 ], [ %v44, %b2 ]
+  %v8 = phi <16 x i32>* [ %v3, %b1 ], [ %v43, %b2 ]
+  %v9 = phi <16 x i32>* [ %v4, %b1 ], [ %v29, %b2 ]
+  %v10 = phi <16 x i32>* [ %v5, %b1 ], [ %v32, %b2 ]
+  %v11 = phi <16 x i32>* [ %v6, %b1 ], [ %v27, %b2 ]
+  %v12 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 1
+  %v13 = load <16 x i32>, <16 x i32>* %v11, align 64, !tbaa !0
+  %v14 = getelementptr inbounds <16 x i32>, <16 x i32>* %v9, i32 1
+  %v15 = load <16 x i32>, <16 x i32>* %v9, align 64, !tbaa !0
+  %v16 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v13, <16 x i32> %v15)
+  %v17 = getelementptr inbounds <16 x i32>, <16 x i32>* %v10, i32 1
+  %v18 = load <16 x i32>, <16 x i32>* %v10, align 64, !tbaa !0
+  %v19 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %v18, <16 x i32> %v0)
+  %v20 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v16)
+  %v21 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v19)
+  %v22 = tail call <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32> %v20, <16 x i32> %v21)
+  %v23 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v16)
+  %v24 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v19)
+  %v25 = tail call <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32> %v23, <16 x i32> %v24)
+  %v26 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v25, <16 x i32> %v22)
+  %v27 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 2
+  %v28 = load <16 x i32>, <16 x i32>* %v12, align 64, !tbaa !0
+  %v29 = getelementptr inbounds <16 x i32>, <16 x i32>* %v9, i32 2
+  %v30 = load <16 x i32>, <16 x i32>* %v14, align 64, !tbaa !0
+  %v31 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v28, <16 x i32> %v30)
+  %v32 = getelementptr inbounds <16 x i32>, <16 x i32>* %v10, i32 2
+  %v33 = load <16 x i32>, <16 x i32>* %v17, align 64, !tbaa !0
+  %v34 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %v33, <16 x i32> %v0)
+  %v35 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v31)
+  %v36 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v34)
+  %v37 = tail call <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32> %v35, <16 x i32> %v36)
+  %v38 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v31)
+  %v39 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v34)
+  %v40 = tail call <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32> %v38, <16 x i32> %v39)
+  %v41 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v40, <16 x i32> %v37)
+  %v42 = tail call <16 x i32> @llvm.hexagon.V6.vpackhub.sat(<16 x i32> %v41, <16 x i32> %v26)
+  %v43 = getelementptr inbounds <16 x i32>, <16 x i32>* %v8, i32 1
+  store <16 x i32> %v42, <16 x i32>* %v8, align 64, !tbaa !0
+  %v44 = add nsw i32 %v7, 1
+  %v45 = icmp slt i32 %v44, %v1
+  br i1 %v45, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vpackhub.sat(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/v6-unaligned-spill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6-unaligned-spill.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6-unaligned-spill.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6-unaligned-spill.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,53 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that we generate an unaligned vector store for a spill when a function
+; has an alloca. Also, make sure the addressing mode for unaligned store does
+; is not a base+offset with a non-zero offset that is not a multiple of 128.
+
+; CHECK: vmemu(r{{[0-9]+}}+#0)
+
+%s.0 = type { [5 x [4 x i8]], i32, i32, i32, i32 }
+
+; Function Attrs: nounwind
+define i32 @f0(i8* nocapture readonly %a0, i8* nocapture %a1, i8* nocapture readonly %a2, i8* nocapture readonly %a3, i32 %a4, i32 %a5, i32 %a6, %s.0* nocapture readonly %a7) #0 {
+b0:
+  %v0 = alloca i8, i32 %a4, align 128
+  br i1 undef, label %b1, label %b5
+
+b1:                                               ; preds = %b0
+  %v1 = icmp sgt i32 %a5, 2
+  br label %b2
+
+b2:                                               ; preds = %b3, %b2, %b1
+  br i1 undef, label %b3, label %b2
+
+b3:                                               ; preds = %b2
+  call void @f1(i8* undef, i8* undef, i8* nonnull %v0, i32 %a4, i32 %a5, %s.0* %a7)
+  %v2 = tail call <32 x i32> @llvm.hexagon.V6.vd0.128B() #2
+  br i1 %v1, label %b4, label %b2
+
+b4:                                               ; preds = %b4, %b3
+  %v3 = phi <32 x i32> [ %v5, %b4 ], [ undef, %b3 ]
+  %v4 = tail call <32 x i32> @llvm.hexagon.V6.vsubhnq.128B(<1024 x i1> undef, <32 x i32> undef, <32 x i32> %v3) #2
+  %v5 = tail call <32 x i32> @llvm.hexagon.V6.vavguh.128B(<32 x i32> %v3, <32 x i32> %v2) #2
+  br label %b4
+
+b5:                                               ; preds = %b0
+  ret i32 0
+}
+
+; Function Attrs: nounwind
+declare void @f1(i8* nocapture readonly, i8* nocapture readonly, i8* nocapture, i32, i32, %s.0* nocapture readonly) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vd0.128B() #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vsubhnq.128B(<1024 x i1>, <32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vavguh.128B(<32 x i32>, <32 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/v6-vecpred-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6-vecpred-copy.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6-vecpred-copy.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6-vecpred-copy.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,149 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+; CHECK: v{{[0-9]*}} = vxor(v{{[0-9]*}},v{{[0-9]*}})
+; CHECK: if (q{{[-0-3]}}) v{{[0-9]*}}.b += v{{[0-9]*}}.b
+; CHECK: if (q{{[-0-3]}}) v{{[0-9]*}}.b -= v{{[0-9]*}}.b
+; CHECK: if (q{{[-0-3]}}) v{{[0-9]*}}.h += v{{[0-9]*}}.h
+; CHECK: if (q{{[-0-3]}}) v{{[0-9]*}}.h -= v{{[0-9]*}}.h
+; CHECK: if (q{{[-0-3]}}) v{{[0-9]*}}.w += v{{[0-9]*}}.w
+; CHECK: if (q{{[-0-3]}}) v{{[0-9]*}}.w -= v{{[0-9]*}}.w
+; CHECK: if (!q{{[-0-3]}}) v{{[0-9]*}}.b += v{{[0-9]*}}.b
+; CHECK: if (!q{{[-0-3]}}) v{{[0-9]*}}.b -= v{{[0-9]*}}.b
+; CHECK: if (!q{{[-0-3]}}) v{{[0-9]*}}.h += v{{[0-9]*}}.h
+; CHECK: if (!q{{[-0-3]}}) v{{[0-9]*}}.h -= v{{[0-9]*}}.h
+; CHECK: if (!q{{[-0-3]}}) v{{[0-9]*}}.w += v{{[0-9]*}}.w
+; CHECK: if (!q{{[-0-3]}}) v{{[0-9]*}}.w -= v{{[0-9]*}}.w
+
+target triple = "hexagon"
+
+ at g0 = common global <16 x i32> zeroinitializer, align 64
+ at g1 = common global <16 x i32> zeroinitializer, align 64
+ at g2 = common global <16 x i32> zeroinitializer, align 64
+ at g3 = common global <16 x i32> zeroinitializer, align 64
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = call <16 x i32> @llvm.hexagon.V6.vd0()
+  store <16 x i32> %v0, <16 x i32>* @g0, align 64
+  %v1 = call <16 x i32> @llvm.hexagon.V6.vd0()
+  store <16 x i32> %v1, <16 x i32>* @g1, align 64
+  %v2 = call <16 x i32> @llvm.hexagon.V6.vd0()
+  store <16 x i32> %v2, <16 x i32>* @g2, align 64
+  %v3 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v4 = bitcast <16 x i32> %v3 to <512 x i1>
+  %v5 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v6 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v7 = call <16 x i32> @llvm.hexagon.V6.vaddbq(<512 x i1> %v4, <16 x i32> %v5, <16 x i32> %v6)
+  store <16 x i32> %v7, <16 x i32>* @g2, align 64
+  %v8 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v9 = bitcast <16 x i32> %v8 to <512 x i1>
+  %v10 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v11 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v12 = call <16 x i32> @llvm.hexagon.V6.vsubbq(<512 x i1> %v9, <16 x i32> %v10, <16 x i32> %v11)
+  store <16 x i32> %v12, <16 x i32>* @g2, align 64
+  %v13 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v14 = bitcast <16 x i32> %v13 to <512 x i1>
+  %v15 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v16 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v17 = call <16 x i32> @llvm.hexagon.V6.vaddhq(<512 x i1> %v14, <16 x i32> %v15, <16 x i32> %v16)
+  store <16 x i32> %v17, <16 x i32>* @g2, align 64
+  %v18 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v19 = bitcast <16 x i32> %v18 to <512 x i1>
+  %v20 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v21 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v22 = call <16 x i32> @llvm.hexagon.V6.vsubhq(<512 x i1> %v19, <16 x i32> %v20, <16 x i32> %v21)
+  store <16 x i32> %v22, <16 x i32>* @g2, align 64
+  %v23 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v24 = bitcast <16 x i32> %v23 to <512 x i1>
+  %v25 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v26 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v27 = call <16 x i32> @llvm.hexagon.V6.vaddwq(<512 x i1> %v24, <16 x i32> %v25, <16 x i32> %v26)
+  store <16 x i32> %v27, <16 x i32>* @g2, align 64
+  %v28 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v29 = bitcast <16 x i32> %v28 to <512 x i1>
+  %v30 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v31 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v32 = call <16 x i32> @llvm.hexagon.V6.vsubwq(<512 x i1> %v29, <16 x i32> %v30, <16 x i32> %v31)
+  store <16 x i32> %v32, <16 x i32>* @g2, align 64
+  %v33 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v34 = bitcast <16 x i32> %v33 to <512 x i1>
+  %v35 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v36 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v37 = call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v34, <16 x i32> %v35, <16 x i32> %v36)
+  store <16 x i32> %v37, <16 x i32>* @g2, align 64
+  %v38 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v39 = bitcast <16 x i32> %v38 to <512 x i1>
+  %v40 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v41 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v42 = call <16 x i32> @llvm.hexagon.V6.vsubbnq(<512 x i1> %v39, <16 x i32> %v40, <16 x i32> %v41)
+  store <16 x i32> %v42, <16 x i32>* @g2, align 64
+  %v43 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v44 = bitcast <16 x i32> %v43 to <512 x i1>
+  %v45 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v46 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v47 = call <16 x i32> @llvm.hexagon.V6.vaddhnq(<512 x i1> %v44, <16 x i32> %v45, <16 x i32> %v46)
+  store <16 x i32> %v47, <16 x i32>* @g2, align 64
+  %v48 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v49 = bitcast <16 x i32> %v48 to <512 x i1>
+  %v50 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v51 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v52 = call <16 x i32> @llvm.hexagon.V6.vsubhnq(<512 x i1> %v49, <16 x i32> %v50, <16 x i32> %v51)
+  store <16 x i32> %v52, <16 x i32>* @g2, align 64
+  %v53 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v54 = bitcast <16 x i32> %v53 to <512 x i1>
+  %v55 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v56 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v57 = call <16 x i32> @llvm.hexagon.V6.vaddwnq(<512 x i1> %v54, <16 x i32> %v55, <16 x i32> %v56)
+  store <16 x i32> %v57, <16 x i32>* @g2, align 64
+  %v58 = load <16 x i32>, <16 x i32>* @g3, align 64
+  %v59 = bitcast <16 x i32> %v58 to <512 x i1>
+  %v60 = load <16 x i32>, <16 x i32>* @g2, align 64
+  %v61 = load <16 x i32>, <16 x i32>* @g1, align 64
+  %v62 = call <16 x i32> @llvm.hexagon.V6.vsubwnq(<512 x i1> %v59, <16 x i32> %v60, <16 x i32> %v61)
+  store <16 x i32> %v62, <16 x i32>* @g2, align 64
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vd0() #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddbq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubbq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddhq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubhq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddwq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubwq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubbnq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddhnq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubhnq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddwnq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubwnq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/v60-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v60-align.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v60-align.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v60-align.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,42 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; CHECK: allocframe(r29,#{{[1-9][0-9]*}}):raw
+; CHECK: r29 = and(r29,#-64)
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca <16 x i32>, align 64
+  %v1 = bitcast <16 x i32>* %v0 to i8*
+  call void @llvm.lifetime.start.p0i8(i64 64, i8* %v1) #3
+  %v2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
+  %v3 = tail call <16 x i32> @llvm.hexagon.V6.vsubh.rt(<16 x i32> %v2, i32 -1)
+  store <16 x i32> %v3, <16 x i32>* %v0, align 64, !tbaa !0
+  call void @f1(i32 64, i8* %v1) #3
+  call void @llvm.lifetime.end.p0i8(i64 64, i8* %v1) #3
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubh.rt(<16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+declare void @f1(i32, i8*) #0
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { argmemonly nounwind }
+attributes #3 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/v60-haar-postinc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v60-haar-postinc.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v60-haar-postinc.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v60-haar-postinc.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,120 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+; CHECK: vmem(r{{[0-9]+}}++#1)
+; CHECK: vmem(r{{[0-9]+}}++#1)
+; CHECK: vmem(r{{[0-9]+}}++#1)
+; CHECK: vmem(r{{[0-9]+}}++#1)
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i16* nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8* nocapture %a4, i32 %a5) #0 {
+b0:
+  %v0 = ashr i32 %a3, 2
+  %v1 = ashr i32 %a3, 1
+  %v2 = add i32 %v1, %v0
+  %v3 = icmp sgt i32 %a2, 0
+  br i1 %v3, label %b1, label %b8
+
+b1:                                               ; preds = %b0
+  %v4 = sdiv i32 %a1, 64
+  %v5 = icmp sgt i32 %a1, 63
+  br label %b2
+
+b2:                                               ; preds = %b6, %b1
+  %v6 = phi i32 [ 0, %b1 ], [ %v56, %b6 ]
+  %v7 = ashr exact i32 %v6, 1
+  %v8 = mul nsw i32 %v7, %a3
+  br i1 %v5, label %b3, label %b6
+
+b3:                                               ; preds = %b2
+  %v9 = add nsw i32 %v6, 1
+  %v10 = mul nsw i32 %v9, %a5
+  %v11 = mul nsw i32 %v6, %a5
+  %v12 = add i32 %v2, %v8
+  %v13 = add i32 %v8, %v0
+  %v14 = add i32 %v8, %v1
+  %v15 = getelementptr inbounds i8, i8* %a4, i32 %v10
+  %v16 = getelementptr inbounds i8, i8* %a4, i32 %v11
+  %v17 = getelementptr inbounds i16, i16* %a0, i32 %v12
+  %v18 = getelementptr inbounds i16, i16* %a0, i32 %v13
+  %v19 = getelementptr inbounds i16, i16* %a0, i32 %v14
+  %v20 = getelementptr inbounds i16, i16* %a0, i32 %v8
+  %v21 = bitcast i8* %v15 to <16 x i32>*
+  %v22 = bitcast i8* %v16 to <16 x i32>*
+  %v23 = bitcast i16* %v17 to <16 x i32>*
+  %v24 = bitcast i16* %v18 to <16 x i32>*
+  %v25 = bitcast i16* %v19 to <16 x i32>*
+  %v26 = bitcast i16* %v20 to <16 x i32>*
+  br label %b4
+
+b4:                                               ; preds = %b4, %b3
+  %v27 = phi i32 [ 0, %b3 ], [ %v54, %b4 ]
+  %v28 = phi <16 x i32>* [ %v26, %b3 ], [ %v34, %b4 ]
+  %v29 = phi <16 x i32>* [ %v25, %b3 ], [ %v36, %b4 ]
+  %v30 = phi <16 x i32>* [ %v24, %b3 ], [ %v38, %b4 ]
+  %v31 = phi <16 x i32>* [ %v23, %b3 ], [ %v40, %b4 ]
+  %v32 = phi <16 x i32>* [ %v21, %b3 ], [ %v53, %b4 ]
+  %v33 = phi <16 x i32>* [ %v22, %b3 ], [ %v52, %b4 ]
+  %v34 = getelementptr inbounds <16 x i32>, <16 x i32>* %v28, i32 1
+  %v35 = load <16 x i32>, <16 x i32>* %v28, align 64, !tbaa !0
+  %v36 = getelementptr inbounds <16 x i32>, <16 x i32>* %v29, i32 1
+  %v37 = load <16 x i32>, <16 x i32>* %v29, align 64, !tbaa !0
+  %v38 = getelementptr inbounds <16 x i32>, <16 x i32>* %v30, i32 1
+  %v39 = load <16 x i32>, <16 x i32>* %v30, align 64, !tbaa !0
+  %v40 = getelementptr inbounds <16 x i32>, <16 x i32>* %v31, i32 1
+  %v41 = load <16 x i32>, <16 x i32>* %v31, align 64, !tbaa !0
+  %v42 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v35, <16 x i32> %v37)
+  %v43 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v35, <16 x i32> %v37)
+  %v44 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v39, <16 x i32> %v41)
+  %v45 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v39, <16 x i32> %v41)
+  %v46 = tail call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %v42, <16 x i32> %v44)
+  %v47 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %v42, <16 x i32> %v44)
+  %v48 = tail call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %v43, <16 x i32> %v45)
+  %v49 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %v43, <16 x i32> %v45)
+  %v50 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v47, <16 x i32> %v46)
+  %v51 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v49, <16 x i32> %v48)
+  %v52 = getelementptr inbounds <16 x i32>, <16 x i32>* %v33, i32 1
+  store <16 x i32> %v50, <16 x i32>* %v33, align 64, !tbaa !0
+  %v53 = getelementptr inbounds <16 x i32>, <16 x i32>* %v32, i32 1
+  store <16 x i32> %v51, <16 x i32>* %v32, align 64, !tbaa !0
+  %v54 = add nsw i32 %v27, 1
+  %v55 = icmp slt i32 %v54, %v4
+  br i1 %v55, label %b4, label %b5
+
+b5:                                               ; preds = %b4
+  br label %b6
+
+b6:                                               ; preds = %b5, %b2
+  %v56 = add nsw i32 %v6, 2
+  %v57 = icmp slt i32 %v56, %a2
+  br i1 %v57, label %b2, label %b7
+
+b7:                                               ; preds = %b6
+  br label %b8
+
+b8:                                               ; preds = %b7, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,51 @@
+; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
+
+; do not crash with Cannot select on vcombine on v128i8
+; CHECK: vadd
+
+target triple = "hexagon-unknown--elf"
+
+; Function Attrs: norecurse nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b6, label %b1
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b2, label %b6, !prof !1
+
+b2:                                               ; preds = %b1
+  br label %b3
+
+b3:                                               ; preds = %b5, %b2
+  br i1 undef, label %b4, label %b5, !prof !1
+
+b4:                                               ; preds = %b3
+  %v0 = load <64 x i8>, <64 x i8>* undef, align 1
+  %v1 = shufflevector <64 x i8> %v0, <64 x i8> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
+  %v2 = bitcast <128 x i8> %v1 to <32 x i32>
+  %v3 = tail call <32 x i32> @llvm.hexagon.V6.vaddb.dv(<32 x i32> undef, <32 x i32> %v2)
+  %v4 = bitcast <32 x i32> %v3 to <128 x i8>
+  %v5 = shufflevector <128 x i8> %v4, <128 x i8> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+  store <64 x i8> %v5, <64 x i8>* undef, align 1, !tbaa !2
+  br label %b5
+
+b5:                                               ; preds = %b4, %b3
+  br i1 undef, label %b6, label %b3
+
+b6:                                               ; preds = %b5, %b1, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddb.dv(<32 x i32>, <32 x i32>) #1
+
+attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 2, !"halide_mattrs", !"+hvx"}
+!1 = !{!"branch_weights", i32 1073741824, i32 0}
+!2 = !{!3, !3, i64 0}
+!3 = !{!"Addb$6", !4}
+!4 = !{!"Halide buffer"}

Added: llvm/trunk/test/CodeGen/Hexagon/v60-vec-128b-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v60-vec-128b-1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v60-vec-128b-1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v60-vec-128b-1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,43 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK: v{{[0-9]+}} = vsplat(r{{[0-9]+}})
+; CHECK: .comm g0,256,256
+; CHECK: .comm g1,128,128
+
+target triple = "hexagon"
+
+ at g0 = common global <64 x i32> zeroinitializer, align 256
+ at g1 = common global <32 x i32> zeroinitializer, align 128
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  store i32 0, i32* %v0
+  %v1 = call i32 @f1(i8 zeroext 0)
+  call void bitcast (void (...)* @f2 to void ()*)()
+  %v2 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1)
+  %v3 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 2)
+  %v4 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %v2, <32 x i32> %v3)
+  %v5 = call <64 x i32> @llvm.hexagon.V6.vtmpyhb.128B(<64 x i32> %v4, i32 12)
+  store <64 x i32> %v5, <64 x i32>* @g0, align 256
+  call void @f3(i32 2048, i8* bitcast (<64 x i32>* @g0 to i8*))
+  ret i32 0
+}
+
+declare i32 @f1(i8 zeroext) #0
+
+declare void @f2(...) #0
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vtmpyhb.128B(<64 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32) #1
+
+declare void @f3(i32, i8*) #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/v60-vecpred-spill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v60-vecpred-spill.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v60-vecpred-spill.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v60-vecpred-spill.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,191 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK-NOT: vmem(r30+#-1){{ *} = v{{[0-9]+}}
+; CHECK-NOT: v{{[0-9]+}} = vmem(r30+#-1)
+; CHECK: v{{[0-9]+}} = vmux
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i8* nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i16* nocapture %a4, i16* nocapture %a5) #0 {
+b0:
+  %v0 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %a3)
+  %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v0)
+  %v2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 16843009)
+  %v3 = tail call <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32> undef, <16 x i32> undef)
+  %v4 = sdiv i32 %a2, 64
+  %v5 = icmp sgt i32 %a2, 63
+  br i1 %v5, label %b1, label %b6
+
+b1:                                               ; preds = %b0
+  %v6 = bitcast i16* %a5 to <16 x i32>*
+  %v7 = bitcast i16* %a4 to <16 x i32>*
+  %v8 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v3, <16 x i32> %v3)
+  br label %b2
+
+b2:                                               ; preds = %b4, %b1
+  %v9 = phi i32 [ 0, %b1 ], [ %v100, %b4 ]
+  %v10 = phi i8* [ %a0, %b1 ], [ %v87, %b4 ]
+  %v11 = phi <16 x i32>* [ %v6, %b1 ], [ %v99, %b4 ]
+  %v12 = phi <16 x i32>* [ %v7, %b1 ], [ %v95, %b4 ]
+  %v13 = bitcast i8* %v10 to <16 x i32>*
+  %v14 = load <16 x i32>, <16 x i32>* %v13, align 64, !tbaa !0
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v15 = phi i32 [ -4, %b2 ], [ %v83, %b3 ]
+  %v16 = phi <32 x i32> [ %v8, %b2 ], [ %v78, %b3 ]
+  %v17 = phi <16 x i32> [ %v3, %b2 ], [ %v82, %b3 ]
+  %v18 = mul nsw i32 %v15, %a1
+  %v19 = getelementptr inbounds i8, i8* %v10, i32 %v18
+  %v20 = bitcast i8* %v19 to <16 x i32>*
+  %v21 = add i32 %v18, -64
+  %v22 = getelementptr inbounds i8, i8* %v10, i32 %v21
+  %v23 = bitcast i8* %v22 to <16 x i32>*
+  %v24 = load <16 x i32>, <16 x i32>* %v23, align 64, !tbaa !0
+  %v25 = load <16 x i32>, <16 x i32>* %v20, align 64, !tbaa !0
+  %v26 = add i32 %v18, 64
+  %v27 = getelementptr inbounds i8, i8* %v10, i32 %v26
+  %v28 = bitcast i8* %v27 to <16 x i32>*
+  %v29 = load <16 x i32>, <16 x i32>* %v28, align 64, !tbaa !0
+  %v30 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v25, <16 x i32> %v14)
+  %v31 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v30, <16 x i32> %v1)
+  %v32 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v31, <16 x i32> %v3, <16 x i32> %v25)
+  %v33 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v16, <16 x i32> %v32, i32 16843009)
+  %v34 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v31, <16 x i32> %v17, <16 x i32> %v2)
+  %v35 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v25, <16 x i32> %v24, i32 1)
+  %v36 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v29, <16 x i32> %v25, i32 1)
+  %v37 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v25, <16 x i32> %v24, i32 2)
+  %v38 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v29, <16 x i32> %v25, i32 2)
+  %v39 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v35, <16 x i32> %v14)
+  %v40 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v36, <16 x i32> %v14)
+  %v41 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v37, <16 x i32> %v14)
+  %v42 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v38, <16 x i32> %v14)
+  %v43 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v39, <16 x i32> %v1)
+  %v44 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v40, <16 x i32> %v1)
+  %v45 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v41, <16 x i32> %v1)
+  %v46 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v42, <16 x i32> %v1)
+  %v47 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v43, <16 x i32> %v3, <16 x i32> %v35)
+  %v48 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v44, <16 x i32> %v3, <16 x i32> %v36)
+  %v49 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v45, <16 x i32> %v3, <16 x i32> %v37)
+  %v50 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v46, <16 x i32> %v3, <16 x i32> %v38)
+  %v51 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v48, <16 x i32> %v47)
+  %v52 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v33, <32 x i32> %v51, i32 16843009)
+  %v53 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v50, <16 x i32> %v49)
+  %v54 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v52, <32 x i32> %v53, i32 16843009)
+  %v55 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v43, <16 x i32> %v34, <16 x i32> %v2)
+  %v56 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v44, <16 x i32> %v55, <16 x i32> %v2)
+  %v57 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v45, <16 x i32> %v56, <16 x i32> %v2)
+  %v58 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v46, <16 x i32> %v57, <16 x i32> %v2)
+  %v59 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v25, <16 x i32> %v24, i32 3)
+  %v60 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v29, <16 x i32> %v25, i32 3)
+  %v61 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v25, <16 x i32> %v24, i32 4)
+  %v62 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v29, <16 x i32> %v25, i32 4)
+  %v63 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v59, <16 x i32> %v14)
+  %v64 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v60, <16 x i32> %v14)
+  %v65 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v61, <16 x i32> %v14)
+  %v66 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v62, <16 x i32> %v14)
+  %v67 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v63, <16 x i32> %v1)
+  %v68 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v64, <16 x i32> %v1)
+  %v69 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v65, <16 x i32> %v1)
+  %v70 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v66, <16 x i32> %v1)
+  %v71 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v67, <16 x i32> %v3, <16 x i32> %v59)
+  %v72 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v68, <16 x i32> %v3, <16 x i32> %v60)
+  %v73 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v69, <16 x i32> %v3, <16 x i32> %v61)
+  %v74 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v70, <16 x i32> %v3, <16 x i32> %v62)
+  %v75 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v72, <16 x i32> %v71)
+  %v76 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v54, <32 x i32> %v75, i32 16843009)
+  %v77 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v74, <16 x i32> %v73)
+  %v78 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v76, <32 x i32> %v77, i32 16843009)
+  %v79 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v67, <16 x i32> %v58, <16 x i32> %v2)
+  %v80 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v68, <16 x i32> %v79, <16 x i32> %v2)
+  %v81 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v69, <16 x i32> %v80, <16 x i32> %v2)
+  %v82 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v70, <16 x i32> %v81, <16 x i32> %v2)
+  %v83 = add nsw i32 %v15, 1
+  %v84 = icmp eq i32 %v83, 5
+  br i1 %v84, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  %v85 = phi <16 x i32> [ %v82, %b3 ]
+  %v86 = phi <32 x i32> [ %v78, %b3 ]
+  %v87 = getelementptr inbounds i8, i8* %v10, i32 64
+  %v88 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v86)
+  %v89 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v86)
+  %v90 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v88, <16 x i32> %v89, i32 -2)
+  %v91 = tail call <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32> %v85)
+  %v92 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v90)
+  %v93 = getelementptr inbounds <16 x i32>, <16 x i32>* %v12, i32 1
+  store <16 x i32> %v92, <16 x i32>* %v12, align 64, !tbaa !0
+  %v94 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v90)
+  %v95 = getelementptr inbounds <16 x i32>, <16 x i32>* %v12, i32 2
+  store <16 x i32> %v94, <16 x i32>* %v93, align 64, !tbaa !0
+  %v96 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v91)
+  %v97 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 1
+  store <16 x i32> %v96, <16 x i32>* %v11, align 64, !tbaa !0
+  %v98 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v91)
+  %v99 = getelementptr inbounds <16 x i32>, <16 x i32>* %v11, i32 2
+  store <16 x i32> %v98, <16 x i32>* %v97, align 64, !tbaa !0
+  %v100 = add nsw i32 %v9, 1
+  %v101 = icmp slt i32 %v100, %v4
+  br i1 %v101, label %b2, label %b5
+
+b5:                                               ; preds = %b4
+  br label %b6
+
+b6:                                               ; preds = %b5, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.vsplatrb(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/v60-vsel2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v60-vsel2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v60-vsel2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v60-vsel2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,99 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+
+; CHECK: v{{[0-9]+}}:{{[0-9]+}} = vcombine(v{{[0-9]+}},v{{[0-9]+}})
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i8* nocapture readnone %a0, i32 %a1, i32 %a2, i32 %a3, i32* nocapture %a4, i32 %a5) #0 {
+b0:
+  %v0 = bitcast i32* %a4 to <16 x i32>*
+  %v1 = mul i32 %a5, -2
+  %v2 = add i32 %v1, %a1
+  %v3 = and i32 %a5, 63
+  %v4 = add i32 %v2, %v3
+  %v5 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 -1)
+  %v6 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
+  %v7 = tail call <512 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %v4)
+  %v8 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32> %v6, <512 x i1> %v7, i32 12)
+  %v9 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v8, <16 x i32> %v8)
+  %v10 = and i32 %v4, 511
+  %v11 = icmp eq i32 %v10, 0
+  br i1 %v11, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v12 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v5, <16 x i32> %v8)
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v13 = phi <32 x i32> [ %v12, %b1 ], [ %v9, %b0 ]
+  %v14 = icmp sgt i32 %v4, 0
+  br i1 %v14, label %b3, label %b6
+
+b3:                                               ; preds = %b2
+  %v15 = tail call <512 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %a5)
+  %v16 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<512 x i1> %v15, i32 16843009)
+  %v17 = tail call <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32> %v16)
+  %v18 = add i32 %v3, %a1
+  %v19 = add i32 %v18, -1
+  %v20 = add i32 %v19, %v1
+  %v21 = lshr i32 %v20, 9
+  %v22 = mul i32 %v21, 16
+  %v23 = add nuw nsw i32 %v22, 16
+  %v24 = getelementptr i32, i32* %a4, i32 %v23
+  br label %b4
+
+b4:                                               ; preds = %b4, %b3
+  %v25 = phi i32 [ %v4, %b3 ], [ %v30, %b4 ]
+  %v26 = phi <16 x i32> [ %v17, %b3 ], [ %v5, %b4 ]
+  %v27 = phi <16 x i32>* [ %v0, %b3 ], [ %v29, %b4 ]
+  %v28 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> undef, <16 x i32> %v26)
+  %v29 = getelementptr inbounds <16 x i32>, <16 x i32>* %v27, i32 1
+  store <16 x i32> %v28, <16 x i32>* %v27, align 64, !tbaa !0
+  %v30 = add nsw i32 %v25, -512
+  %v31 = icmp sgt i32 %v30, 0
+  br i1 %v31, label %b4, label %b5
+
+b5:                                               ; preds = %b4
+  %v32 = bitcast i32* %v24 to <16 x i32>*
+  br label %b6
+
+b6:                                               ; preds = %b5, %b2
+  %v33 = phi <16 x i32>* [ %v32, %b5 ], [ %v0, %b2 ]
+  %v34 = load <16 x i32>, <16 x i32>* %v33, align 64, !tbaa !0
+  %v35 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v13)
+  %v36 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %v34, <16 x i32> %v35)
+  store <16 x i32> %v36, <16 x i32>* %v33, align 64, !tbaa !0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.hexagon.V6.pred.scalar2(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32>, <512 x i1>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vandqrt(<512 x i1>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vand(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/v60_Q6_P_rol_PI.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v60_Q6_P_rol_PI.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v60_Q6_P_rol_PI.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v60_Q6_P_rol_PI.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,28 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; CHECK: r{{[0-9]*}}:{{[0-9]*}} = rol(r{{[0-9]*}}:{{[0-9]*}},#4)
+
+target triple = "hexagon"
+
+ at g0 = private unnamed_addr constant [33 x i8] c"%llx :  Q6_P_rol_PI(LONG_MIN,0)\0A\00", align 1
+
+; Function Attrs: nounwind
+declare i32 @f0(i8*, ...) #0
+
+; Function Attrs: nounwind
+define i32 @f1() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  store i32 0, i32* %v0
+  store i32 0, i32* %v1, align 4
+  %v2 = call i64 @llvm.hexagon.S6.rol.i.p(i64 483648, i32 4)
+  %v3 = call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([33 x i8], [33 x i8]* @g0, i32 0, i32 0), i64 %v2) #2
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S6.rol.i.p(i64, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/v60_sort16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v60_sort16.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v60_sort16.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v60_sort16.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,104 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; Looking for 3rd register field to be restricted to r0-r7.
+; v3:2=vdeal(v3,v2,r1)
+; CHECK: v{{[0-9]+}}:{{[0-9]+}} = vdeal(v{{[0-9]+}},v{{[0-9]+}},r{{[0-7]+}})
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i16* %a0, i32 %a1, i8* %a2, i16* %a3) #0 {
+b0:
+  %v0 = alloca i16*, align 4
+  %v1 = alloca i32, align 4
+  %v2 = alloca i8*, align 4
+  %v3 = alloca i16*, align 4
+  %v4 = alloca i32, align 4
+  %v5 = alloca i32, align 4
+  %v6 = alloca i32, align 4
+  %v7 = alloca i32, align 4
+  %v8 = alloca i32, align 4
+  %v9 = alloca i16*, align 4
+  %v10 = alloca i16*, align 4
+  %v11 = alloca <16 x i32>, align 64
+  %v12 = alloca <16 x i32>, align 64
+  %v13 = alloca <32 x i32>, align 128
+  %v14 = alloca <16 x i32>, align 64
+  %v15 = alloca <16 x i32>, align 64
+  %v16 = alloca <32 x i32>, align 128
+  %v17 = alloca <16 x i32>, align 64
+  %v18 = alloca <16 x i32>, align 64
+  store i16* %a0, i16** %v0, align 4
+  store i32 %a1, i32* %v1, align 4
+  store i8* %a2, i8** %v2, align 4
+  store i16* %a3, i16** %v3, align 4
+  %v19 = load i8*, i8** %v2, align 4
+  %v20 = getelementptr inbounds i8, i8* %v19, i32 192
+  %v21 = bitcast i8* %v20 to <16 x i32>*
+  %v22 = load <16 x i32>, <16 x i32>* %v21, align 64
+  store <16 x i32> %v22, <16 x i32>* %v12, align 64
+  store i32 16843009, i32* %v4, align 4
+  %v23 = load i32, i32* %v4, align 4
+  %v24 = load i32, i32* %v4, align 4
+  %v25 = add nsw i32 %v23, %v24
+  store i32 %v25, i32* %v5, align 4
+  %v26 = load i32, i32* %v5, align 4
+  %v27 = load i32, i32* %v5, align 4
+  %v28 = add nsw i32 %v26, %v27
+  store i32 %v28, i32* %v6, align 4
+  %v29 = load i16*, i16** %v0, align 4
+  store i16* %v29, i16** %v9, align 4
+  %v30 = load i16*, i16** %v3, align 4
+  store i16* %v30, i16** %v10, align 4
+  store i32 0, i32* %v8, align 4
+  br label %b1
+
+b1:                                               ; preds = %b3, %b0
+  %v31 = load i32, i32* %v8, align 4
+  %v32 = load i32, i32* %v1, align 4
+  %v33 = icmp slt i32 %v31, %v32
+  br i1 %v33, label %b2, label %b4
+
+b2:                                               ; preds = %b1
+  %v34 = load <16 x i32>, <16 x i32>* %v11, align 64
+  %v35 = bitcast <16 x i32> %v34 to <512 x i1>
+  %v36 = load <16 x i32>, <16 x i32>* %v14, align 64
+  %v37 = load <16 x i32>, <16 x i32>* %v15, align 64
+  %v38 = call <32 x i32> @llvm.hexagon.V6.vswap(<512 x i1> %v35, <16 x i32> %v36, <16 x i32> %v37)
+  store <32 x i32> %v38, <32 x i32>* %v13, align 128
+  %v39 = load <32 x i32>, <32 x i32>* %v13, align 128
+  %v40 = call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v39)
+  store <16 x i32> %v40, <16 x i32>* %v14, align 64
+  %v41 = load <32 x i32>, <32 x i32>* %v13, align 128
+  %v42 = call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v41)
+  store <16 x i32> %v42, <16 x i32>* %v15, align 64
+  %v43 = load <16 x i32>, <16 x i32>* %v17, align 64
+  %v44 = load <16 x i32>, <16 x i32>* %v18, align 64
+  %v45 = load i32, i32* %v7, align 4
+  %v46 = call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %v43, <16 x i32> %v44, i32 %v45)
+  store <32 x i32> %v46, <32 x i32>* %v16, align 128
+  br label %b3
+
+b3:                                               ; preds = %b2
+  %v47 = load i32, i32* %v8, align 4
+  %v48 = add nsw i32 %v47, 1
+  store i32 %v48, i32* %v8, align 4
+  br label %b1
+
+b4:                                               ; preds = %b1
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vswap(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32>, <16 x i32>, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/v60rol-instrs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v60rol-instrs.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v60rol-instrs.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v60rol-instrs.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,53 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+
+; CHECK: r{{[0-9]*}} += rol(r{{[0-9]*}},#31)
+; CHECK: r{{[0-9]*}} &= rol(r{{[0-9]*}},#31)
+; CHECK: r{{[0-9]*}} -= rol(r{{[0-9]*}},#31)
+; CHECK: r{{[0-9]*}} |= rol(r{{[0-9]*}},#31)
+; CHECK: r{{[0-9]*}} ^= rol(r{{[0-9]*}},#31)
+
+target triple = "hexagon"
+
+ at g0 = common global i32 0, align 4
+ at g1 = common global i32 0, align 4
+ at g2 = common global i32 0, align 4
+ at g3 = common global i32 0, align 4
+ at g4 = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  store i32 0, i32* %v0
+  store i32 0, i32* %v1, align 4
+  %v2 = call i32 @llvm.hexagon.S6.rol.i.r.acc(i32 0, i32 1, i32 31)
+  store i32 %v2, i32* @g0, align 4
+  %v3 = call i32 @llvm.hexagon.S6.rol.i.r.and(i32 0, i32 1, i32 31)
+  store i32 %v3, i32* @g1, align 4
+  %v4 = call i32 @llvm.hexagon.S6.rol.i.r.nac(i32 0, i32 1, i32 31)
+  store i32 %v4, i32* @g2, align 4
+  %v5 = call i32 @llvm.hexagon.S6.rol.i.r.or(i32 0, i32 1, i32 31)
+  store i32 %v5, i32* @g3, align 4
+  %v6 = call i32 @llvm.hexagon.S6.rol.i.r.xacc(i32 0, i32 1, i32 31)
+  store i32 %v6, i32* @g4, align 4
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S6.rol.i.r.acc(i32, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S6.rol.i.r.and(i32, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S6.rol.i.r.nac(i32, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S6.rol.i.r.or(i32, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S6.rol.i.r.xacc(i32, i32, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/v62-CJAllSlots.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v62-CJAllSlots.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v62-CJAllSlots.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v62-CJAllSlots.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,63 @@
+; RUN: llc -march=hexagon -O2 -disable-block-placement < %s | FileCheck %s
+; Disable block placement because it intereferes with the generated code.
+
+; CHECK:      if (p{{[0-9]*}}) jump:nt .LBB0_2
+; CHECK-NEXT: v{{[0-9]*}} = vmem(r{{[0-9]*}}+#0)
+; CHECK-NEXT: }
+
+target triple = "hexagon"
+
+ at g0 = global <16 x i32> zeroinitializer, align 64
+
+; Function Attrs: nounwind
+define void @f0(i16* nocapture readonly %a0, i32 %a1, i32 %a2, i16* nocapture %a3) #0 {
+b0:
+  %v0 = mul i32 %a2, -2
+  %v1 = add i32 %v0, 64
+  %v2 = bitcast i16* %a3 to <16 x i32>*
+  %v3 = load <16 x i32>, <16 x i32>* @g0, align 64
+  %v4 = sdiv i32 %a1, 32
+  %v5 = icmp sgt i32 %a1, 31
+  br i1 %v5, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v6 = bitcast i16* %a0 to <16 x i32>*
+  %v7 = icmp sgt i32 %a1, 63
+  %v8 = mul i32 %v4, 32
+  %v9 = select i1 %v7, i32 %v8, i32 32
+  %v10 = getelementptr i16, i16* %a3, i32 %v9
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v11 = phi i32 [ 0, %b1 ], [ %v19, %b2 ]
+  %v12 = phi <16 x i32> [ %v3, %b1 ], [ %v16, %b2 ]
+  %v13 = phi <16 x i32>* [ %v2, %b1 ], [ %v18, %b2 ]
+  %v14 = phi <16 x i32>* [ %v6, %b1 ], [ %v15, %b2 ]
+  %v15 = getelementptr inbounds <16 x i32>, <16 x i32>* %v14, i32 1
+  %v16 = load <16 x i32>, <16 x i32>* %v14, align 64
+  %v17 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v16, <16 x i32> %v12, i32 %v1)
+  %v18 = getelementptr inbounds <16 x i32>, <16 x i32>* %v13, i32 1
+  store <16 x i32> %v17, <16 x i32>* %v13, align 64
+  %v19 = add nsw i32 %v11, 1
+  %v20 = icmp slt i32 %v19, %v4
+  br i1 %v20, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  %v21 = bitcast i16* %v10 to <16 x i32>*
+  %v22 = load <16 x i32>, <16 x i32>* @g0, align 64
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  %v23 = phi <16 x i32> [ %v22, %b3 ], [ %v3, %b0 ]
+  %v24 = phi <16 x i32> [ %v16, %b3 ], [ %v3, %b0 ]
+  %v25 = phi <16 x i32>* [ %v21, %b3 ], [ %v2, %b0 ]
+  %v26 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v23, <16 x i32> %v24, i32 %v1)
+  store <16 x i32> %v26, <16 x i32>* %v25, align 64
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv62" "target-features"="+hvxv62,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/v62-inlasm4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v62-inlasm4.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v62-inlasm4.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v62-inlasm4.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,27 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK: q{{[0-3]}} = vsetq2(r{{[0-9]+}})
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0, <16 x i32> %a1) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca <16 x i32>, align 64
+  %v2 = alloca <16 x i32>, align 64
+  store i32 %a0, i32* %v0, align 4
+  store <16 x i32> %a1, <16 x i32>* %v1, align 64
+  %v3 = load i32, i32* %v0, align 4
+  %v4 = load <16 x i32>, <16 x i32>* %v2, align 64
+  call void asm sideeffect "  $1 = vsetq2($0);\0A", "r,q"(i32 %v3, <16 x i32> %v4) #1
+  ret void
+}
+
+; Function Attrs: nounwind
+define i32 @f1() #0 {
+b0:
+  ret i32 0
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv62" "target-features"="+hvxv62,+hvx-length64b" }
+attributes #1 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/v6vassignp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vassignp.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vassignp.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vassignp.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,30 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+;   generate vmems for W_equals_W (vassignp)
+; CHECK: vmem
+; CHECK: vmem
+; CHECK: vmem
+; CHECK: vmem
+
+target triple = "hexagon"
+
+ at g0 = common global [15 x <32 x i32>] zeroinitializer, align 64
+ at g1 = common global <32 x i32> zeroinitializer, align 64
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  store i32 0, i32* %v0
+  store i32 0, i32* %v1, align 4
+  %v2 = load <32 x i32>, <32 x i32>* getelementptr inbounds ([15 x <32 x i32>], [15 x <32 x i32>]* @g0, i32 0, i32 0), align 64
+  %v3 = call <32 x i32> @llvm.hexagon.V6.vassignp(<32 x i32> %v2)
+  store <32 x i32> %v3, <32 x i32>* @g1, align 64
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vassignp(<32 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemcur-prob.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemcur-prob.mir?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemcur-prob.mir (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemcur-prob.mir Mon Mar 12 07:01:28 2018
@@ -0,0 +1,20 @@
+# RUN: llc -march=hexagon -start-after if-converter %s -o - | FileCheck %s
+
+# Test that we do no generate a .cur, which refers to vector register generated
+# in a previous packet and used in the current packet.
+
+# CHECK-NOT: .cur
+
+---
+name: fred
+tracksRegLiveness: true
+
+body: |
+  bb.0:
+    liveins: $r0, $v0, $v1
+
+    $v2 = V6_vaddh $v0, $v1
+    $v0 = V6_vL32b_ai $r0, 0
+    J2_jumpr $r31, implicit-def $pc, implicit $v0
+...
+

Added: llvm/trunk/test/CodeGen/Hexagon/v6vec-vshuff.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vec-vshuff.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vec-vshuff.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vec-vshuff.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon -disable-hexagon-shuffle=1 -O2 < %s | FileCheck %s
+; Generate vshuff with 3rd param as an Rt8.
+; v1:0=vshuff(v0,v1,r7)
+; CHECK: vshuff(v{{[0-9]+}},v{{[0-9]+}},r{{[0-7]}})
+
+target triple = "hexagon"
+
+ at g0 = common global [15 x <16 x i32>] zeroinitializer, align 64
+ at g1 = common global <32 x i32> zeroinitializer, align 128
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 0), align 64, !tbaa !0
+  %v1 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 1), align 64, !tbaa !0
+  %v2 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v0, <16 x i32> %v1, i32 -2147483648)
+  store <32 x i32> %v2, <32 x i32>* @g1, align 128, !tbaa !0
+  %v3 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32, <32 x i32>*)*)(i32 1024, <32 x i32>* @g1) #0
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32>, <16 x i32>, i32) #1
+
+declare i32 @f1(...) #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/v6vec_zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vec_zero.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vec_zero.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vec_zero.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,21 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that we do not ICE with a cannot select message when
+; generating a v16i32 constant pool node.
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ 0, %b1 ], [ 0, %b0 ]
+  store <16 x i32> zeroinitializer, <16 x i32>* null, align 64
+  br i1 false, label %b1, label %b2
+
+b2:                                               ; preds = %b1, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }

Added: llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl-fail1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl-fail1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl-fail1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl-fail1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,45 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; CHECK: vmem
+; CHECK: vmem
+; CHECK-NOT:  r{{[0-9]*}} = add(r30,#-256)
+; CHECK: vmem
+; CHECK: vmem
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i8* %a0, i8* %a1, i32 %a2, i8* %a3, i32 %a4) #0 {
+b0:
+  %v0 = alloca i8*, align 4
+  %v1 = alloca i8*, align 4
+  %v2 = alloca i32, align 4
+  %v3 = alloca i8*, align 4
+  %v4 = alloca i32, align 4
+  %v5 = alloca <16 x i32>, align 64
+  %v6 = alloca <32 x i32>, align 128
+  store i8* %a0, i8** %v0, align 4
+  store i8* %a1, i8** %v1, align 4
+  store i32 %a2, i32* %v2, align 4
+  store i8* %a3, i8** %v3, align 4
+  store i32 %a4, i32* %v4, align 4
+  %v7 = load i8*, i8** %v0, align 4
+  %v8 = bitcast i8* %v7 to <16 x i32>*
+  %v9 = load <16 x i32>, <16 x i32>* %v8, align 64
+  %v10 = load i8*, i8** %v0, align 4
+  %v11 = getelementptr inbounds i8, i8* %v10, i32 64
+  %v12 = bitcast i8* %v11 to <16 x i32>*
+  %v13 = load <16 x i32>, <16 x i32>* %v12, align 64
+  %v14 = call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v9, <16 x i32> %v13)
+  store <32 x i32> %v14, <32 x i32>* %v6, align 128
+  %v15 = load i8*, i8** %v3, align 4
+  %v16 = bitcast i8* %v15 to <16 x i32>*
+  %v17 = load <16 x i32>, <16 x i32>* %v16, align 64
+  store <16 x i32> %v17, <16 x i32>* %v5, align 64
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl-spill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl-spill.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl-spill.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl-spill.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,151 @@
+; RUN: llc -march=hexagon -O3 < %s
+; REQUIRES: asserts
+
+; Test that we don't assert because the compiler generates the wrong register
+; class for the vector spill code in 128B mode.
+
+define void @f0(i32 %a0) #0 {
+b0:
+  %v0 = tail call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 16843009)
+  %v1 = tail call <32 x i32> @llvm.hexagon.V6.vshuffh.128B(<32 x i32> undef)
+  %v2 = tail call <32 x i32> @llvm.hexagon.V6.vshuffh.128B(<32 x i32> zeroinitializer)
+  %v3 = sdiv i32 %a0, 128
+  %v4 = icmp sgt i32 %a0, 127
+  br i1 %v4, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v5 = phi i32 [ %v77, %b1 ], [ 0, %b0 ]
+  %v6 = phi <32 x i32>* [ undef, %b1 ], [ undef, %b0 ]
+  %v7 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> undef, <32 x i32> undef)
+  %v8 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v7, <32 x i32> zeroinitializer)
+  %v9 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v8, <32 x i32> undef, <32 x i32> %v0)
+  %v10 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 3)
+  %v11 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> zeroinitializer, <32 x i32> undef)
+  %v12 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v10, <32 x i32> undef)
+  %v13 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v11, <32 x i32> zeroinitializer)
+  %v14 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v12, <32 x i32> zeroinitializer)
+  %v15 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v13, <32 x i32> %v9, <32 x i32> %v0)
+  %v16 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v14, <32 x i32> %v15, <32 x i32> %v0)
+  %v17 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v16, <32 x i32> %v0)
+  %v18 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v17, <32 x i32> %v0)
+  %v19 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> undef, <32 x i32> zeroinitializer)
+  %v20 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v19, <32 x i32> %v18, <32 x i32> %v0)
+  %v21 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> undef, <32 x i32> zeroinitializer)
+  %v22 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> undef, <32 x i32> undef, <32 x i32> undef)
+  %v23 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %v21, <32 x i32> undef, <32 x i32> undef)
+  %v24 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v23, <32 x i32> %v22)
+  %v25 = tail call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> zeroinitializer, <64 x i32> %v24, i32 16843009)
+  %v26 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v20, <32 x i32> %v0)
+  %v27 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v26, <32 x i32> %v0)
+  %v28 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v27, <32 x i32> %v0)
+  %v29 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v21, <32 x i32> %v28, <32 x i32> %v0)
+  %v30 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> undef, <32 x i32> zeroinitializer)
+  %v31 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> undef, <32 x i32> undef, <32 x i32> zeroinitializer)
+  %v32 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v31, <32 x i32> undef)
+  %v33 = tail call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %v25, <64 x i32> %v32, i32 16843009)
+  %v34 = tail call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %v33, <64 x i32> undef, i32 16843009)
+  %v35 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v29, <32 x i32> %v0)
+  %v36 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v35, <32 x i32> %v0)
+  %v37 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v36, <32 x i32> %v0)
+  %v38 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v30, <32 x i32> %v37, <32 x i32> %v0)
+  %v39 = load <32 x i32>, <32 x i32>* null, align 128, !tbaa !0
+  %v40 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> undef, <32 x i32> zeroinitializer)
+  %v41 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %v40, <32 x i32> undef, <32 x i32> %v39)
+  %v42 = tail call <64 x i32> @llvm.hexagon.V6.vmpybus.acc.128B(<64 x i32> %v34, <32 x i32> %v41, i32 16843009)
+  %v43 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v40, <32 x i32> %v38, <32 x i32> %v0)
+  %v44 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> %v39, <32 x i32> undef, i32 1)
+  %v45 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> %v39, i32 1)
+  %v46 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> %v39, i32 2)
+  %v47 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v44, <32 x i32> undef)
+  %v48 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v45, <32 x i32> undef)
+  %v49 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v46, <32 x i32> undef)
+  %v50 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v47, <32 x i32> zeroinitializer)
+  %v51 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v48, <32 x i32> zeroinitializer)
+  %v52 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v49, <32 x i32> zeroinitializer)
+  %v53 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %v52, <32 x i32> undef, <32 x i32> %v46)
+  %v54 = tail call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %v42, <64 x i32> undef, i32 16843009)
+  %v55 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v53, <32 x i32> undef)
+  %v56 = tail call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %v54, <64 x i32> %v55, i32 16843009)
+  %v57 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v50, <32 x i32> %v43, <32 x i32> %v0)
+  %v58 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v51, <32 x i32> %v57, <32 x i32> %v0)
+  %v59 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v58, <32 x i32> %v0)
+  %v60 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v52, <32 x i32> %v59, <32 x i32> %v0)
+  %v61 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> undef, <32 x i32> zeroinitializer)
+  %v62 = tail call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %v56, <64 x i32> undef, i32 16843009)
+  %v63 = tail call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %v62, <64 x i32> zeroinitializer, i32 16843009)
+  %v64 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v60, <32 x i32> %v0)
+  %v65 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v61, <32 x i32> %v64, <32 x i32> %v0)
+  %v66 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v65, <32 x i32> %v0)
+  %v67 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v66, <32 x i32> %v0)
+  %v68 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> undef, <32 x i32> %v67, <32 x i32> %v1, i32 3)
+  %v69 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v68, <32 x i32> %v67, <32 x i32> %v2, i32 4)
+  %v70 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v69, <32 x i32> %v67, <32 x i32> %v2, i32 5)
+  %v71 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v63)
+  %v72 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v70)
+  %v73 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuhv.128B(<32 x i32> %v71, <32 x i32> %v72)
+  %v74 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v73)
+  %v75 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> %v74, <32 x i32> undef, i32 14)
+  %v76 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> %v75, <32 x i32> undef)
+  store <32 x i32> %v76, <32 x i32>* %v6, align 128, !tbaa !0
+  %v77 = add nsw i32 %v5, 1
+  %v78 = icmp slt i32 %v77, %v3
+  br i1 %v78, label %b1, label %b2
+
+b2:                                               ; preds = %b1, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vshuffh.128B(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1>, <32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vmpybus.acc.128B(<64 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1>, <32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32>, <64 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32>, <32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vmpyuhv.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32>, <32 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,178 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck --check-prefix=CHECKO0 %s
+; KP: Removed -O2 check. The code has become more aggressively optimized
+; (some loads were found to be redundant and have been removed completely),
+; and verifying correct code generation has become more difficult than
+; its worth.
+
+; CHECK: v{{[0-9]*}} = vsplat(r{{[0-9]*}})
+; CHECK: v{{[0-9]*}} = vsplat(r{{[0-9]*}})
+
+; CHECKO0: vmem(r{{[0-9]*}}+#0) = v{{[0-9]*}}
+; CHECKO0: v{{[0-9]*}} = vmem(r{{[0-9]*}}+#0)
+; CHECKO0: v{{[0-9]*}} = vmem(r{{[0-9]*}}+#0)
+
+; Allow .cur loads.
+; CHECKO2: v{{[0-9].*}} = vmem(r{{[0-9]*}}+#0)
+; CHECKO2: vmem(r{{[0-9]*}}+#0) = v{{[0-9]*}}
+; CHECKO2: v{{[0-9].*}} = vmem(r{{[0-9]*}}+#0)
+
+; CHECK: v{{[0-9]*}}:{{[0-9]*}} = vcombine(v{{[0-9]*}},v{{[0-9]*}})
+; CHECK: vmem(r{{[0-9]*}}+#0) = v{{[0-9]*}}
+; CHECK: vmem(r{{[0-9]*}}+#32) = v{{[0-9]*}}
+; CHECK: v{{[0-9]*}} = vmem(r{{[0-9]*}}+#0)
+; CHECK: v{{[0-9]*}} = vmem(r{{[0-9]*}}+#32)
+; CHECK: vmem(r{{[0-9]*}}+#0) = v{{[0-9]*}}
+; CHECK: vmem(r{{[0-9]*}}+#32) = v{{[0-9]*}}
+
+target triple = "hexagon"
+
+ at g0 = common global [10 x <32 x i32>] zeroinitializer, align 64
+ at g1 = private unnamed_addr constant [11 x i8] c"c[%d]= %x\0A\00", align 8
+ at g2 = common global [10 x <16 x i32>] zeroinitializer, align 64
+ at g3 = common global [10 x <16 x i32>] zeroinitializer, align 64
+ at g4 = common global [10 x <32 x i32>] zeroinitializer, align 64
+
+declare i32 @f0(i8*, ...)
+
+; Function Attrs: nounwind
+define void @f1(i32 %a0) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32*, align 4
+  %v2 = alloca i32, align 4
+  store i32 %a0, i32* %v0, align 4
+  store i32* getelementptr inbounds ([10 x <32 x i32>], [10 x <32 x i32>]* @g0, i32 0, i32 0, i32 0), i32** %v1, align 4
+  %v3 = load i32, i32* %v0, align 4
+  %v4 = load i32*, i32** %v1, align 4
+  %v5 = getelementptr inbounds i32, i32* %v4, i32 %v3
+  store i32* %v5, i32** %v1, align 4
+  store i32 0, i32* %v2, align 4
+  br label %b1
+
+b1:                                               ; preds = %b3, %b0
+  %v6 = load i32, i32* %v2, align 4
+  %v7 = icmp slt i32 %v6, 16
+  br i1 %v7, label %b2, label %b4
+
+b2:                                               ; preds = %b1
+  %v8 = load i32, i32* %v2, align 4
+  %v9 = load i32*, i32** %v1, align 4
+  %v10 = getelementptr inbounds i32, i32* %v9, i32 1
+  store i32* %v10, i32** %v1, align 4
+  %v11 = load i32, i32* %v9, align 4
+  %v12 = call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @g1, i32 0, i32 0), i32 %v8, i32 %v11)
+  br label %b3
+
+b3:                                               ; preds = %b2
+  %v13 = load i32, i32* %v2, align 4
+  %v14 = add nsw i32 %v13, 1
+  store i32 %v14, i32* %v2, align 4
+  br label %b1
+
+b4:                                               ; preds = %b1
+  ret void
+}
+
+; Function Attrs: nounwind
+define i32 @f2() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  store i32 0, i32* %v0
+  store i32 0, i32* %v1, align 4
+  br label %b1
+
+b1:                                               ; preds = %b3, %b0
+  %v2 = load i32, i32* %v1, align 4
+  %v3 = icmp slt i32 %v2, 3
+  br i1 %v3, label %b2, label %b4
+
+b2:                                               ; preds = %b1
+  %v4 = load i32, i32* %v1, align 4
+  %v5 = add nsw i32 %v4, 1
+  %v6 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v5)
+  %v7 = load i32, i32* %v1, align 4
+  %v8 = getelementptr inbounds [10 x <16 x i32>], [10 x <16 x i32>]* @g2, i32 0, i32 %v7
+  store <16 x i32> %v6, <16 x i32>* %v8, align 64
+  %v9 = load i32, i32* %v1, align 4
+  %v10 = mul nsw i32 %v9, 10
+  %v11 = add nsw i32 %v10, 1
+  %v12 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v11)
+  %v13 = load i32, i32* %v1, align 4
+  %v14 = getelementptr inbounds [10 x <16 x i32>], [10 x <16 x i32>]* @g3, i32 0, i32 %v13
+  store <16 x i32> %v12, <16 x i32>* %v14, align 64
+  %v15 = load i32, i32* %v1, align 4
+  %v16 = getelementptr inbounds [10 x <16 x i32>], [10 x <16 x i32>]* @g2, i32 0, i32 %v15
+  %v17 = load <16 x i32>, <16 x i32>* %v16, align 64
+  %v18 = load i32, i32* %v1, align 4
+  %v19 = getelementptr inbounds [10 x <16 x i32>], [10 x <16 x i32>]* @g3, i32 0, i32 %v18
+  %v20 = load <16 x i32>, <16 x i32>* %v19, align 64
+  %v21 = call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v17, <16 x i32> %v20)
+  %v22 = load i32, i32* %v1, align 4
+  %v23 = getelementptr inbounds [10 x <32 x i32>], [10 x <32 x i32>]* @g4, i32 0, i32 %v22
+  store <32 x i32> %v21, <32 x i32>* %v23, align 64
+  br label %b3
+
+b3:                                               ; preds = %b2
+  %v24 = load i32, i32* %v1, align 4
+  %v25 = add nsw i32 %v24, 1
+  store i32 %v25, i32* %v1, align 4
+  br label %b1
+
+b4:                                               ; preds = %b1
+  store i32 0, i32* %v1, align 4
+  br label %b5
+
+b5:                                               ; preds = %b7, %b4
+  %v26 = load i32, i32* %v1, align 4
+  %v27 = icmp slt i32 %v26, 3
+  br i1 %v27, label %b6, label %b8
+
+b6:                                               ; preds = %b5
+  %v28 = load i32, i32* %v1, align 4
+  %v29 = getelementptr inbounds [10 x <32 x i32>], [10 x <32 x i32>]* @g4, i32 0, i32 %v28
+  %v30 = load <32 x i32>, <32 x i32>* %v29, align 64
+  %v31 = load i32, i32* %v1, align 4
+  %v32 = getelementptr inbounds [10 x <32 x i32>], [10 x <32 x i32>]* @g0, i32 0, i32 %v31
+  store <32 x i32> %v30, <32 x i32>* %v32, align 64
+  br label %b7
+
+b7:                                               ; preds = %b6
+  %v33 = load i32, i32* %v1, align 4
+  %v34 = add nsw i32 %v33, 1
+  store i32 %v34, i32* %v1, align 4
+  br label %b5
+
+b8:                                               ; preds = %b5
+  store i32 0, i32* %v1, align 4
+  br label %b9
+
+b9:                                               ; preds = %b11, %b8
+  %v35 = load i32, i32* %v1, align 4
+  %v36 = icmp slt i32 %v35, 3
+  br i1 %v36, label %b10, label %b12
+
+b10:                                              ; preds = %b9
+  %v37 = load i32, i32* %v1, align 4
+  %v38 = mul nsw i32 %v37, 16
+  call void @f1(i32 %v38)
+  br label %b11
+
+b11:                                              ; preds = %b10
+  %v39 = load i32, i32* %v1, align 4
+  %v40 = add nsw i32 %v39, 1
+  store i32 %v40, i32* %v1, align 4
+  br label %b9
+
+b12:                                              ; preds = %b9
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/v6vect-dh1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vect-dh1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vect-dh1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vect-dh1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,163 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O1 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+;
+; CHECK-NOT: v{{[0-9]*}}.cur
+;
+; CHECK: {
+; CHECK: v{{[0-9]+}}.h = vasr(v{{[0-9]+}}.w,v{{[0-9]+}}.w,r{{[0-7]+}})
+
+; CHECK: }
+; CHECK: {
+; CHECK: v{{[0-9]+}}.h = vasr(v{{[0-9]+}}.w,v{{[0-9]+}}.w,r{{[0-7]+}})
+; CHECK: }
+; CHECK-NOT: vand
+; CHECK: v{{[0-9]+}} = v{{[0-9]+}}
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i8* nocapture readonly %a0, i8* nocapture readonly %a1, i32 %a2, i8* nocapture %a3, i32 %a4) #0 {
+b0:
+  %v0 = bitcast i8* %a1 to i32*
+  %v1 = load i32, i32* %v0, align 4, !tbaa !0
+  %v2 = getelementptr inbounds i8, i8* %a1, i32 4
+  %v3 = bitcast i8* %v2 to i32*
+  %v4 = load i32, i32* %v3, align 4, !tbaa !0
+  %v5 = getelementptr inbounds i8, i8* %a1, i32 8
+  %v6 = bitcast i8* %v5 to i32*
+  %v7 = load i32, i32* %v6, align 4, !tbaa !0
+  %v8 = mul i32 %a4, 2
+  %v9 = add i32 %v8, %a4
+  %v10 = icmp sgt i32 %a4, 0
+  br i1 %v10, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v11 = getelementptr inbounds i8, i8* %a0, i32 %v9
+  %v12 = getelementptr inbounds i8, i8* %a0, i32 %v8
+  %v13 = getelementptr inbounds i8, i8* %a0, i32 %a4
+  %v14 = add i32 %v9, 64
+  %v15 = bitcast i8* %v11 to <16 x i32>*
+  %v16 = add i32 %v8, 64
+  %v17 = bitcast i8* %v12 to <16 x i32>*
+  %v18 = add i32 %a4, 64
+  %v19 = bitcast i8* %v13 to <16 x i32>*
+  %v20 = bitcast i8* %a0 to <16 x i32>*
+  %v21 = getelementptr inbounds i8, i8* %a0, i32 %v14
+  %v22 = load <16 x i32>, <16 x i32>* %v15, align 64, !tbaa !4
+  %v23 = getelementptr inbounds i8, i8* %a0, i32 %v16
+  %v24 = load <16 x i32>, <16 x i32>* %v17, align 64, !tbaa !4
+  %v25 = getelementptr inbounds i8, i8* %a0, i32 %v18
+  %v26 = load <16 x i32>, <16 x i32>* %v19, align 64, !tbaa !4
+  %v27 = load <16 x i32>, <16 x i32>* %v20, align 64, !tbaa !4
+  %v28 = getelementptr inbounds i8, i8* %a3, i32 %a4
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v29 = phi i8* [ %a0, %b1 ], [ %v40, %b2 ]
+  %v30 = phi i8* [ %a3, %b1 ], [ %v74, %b2 ]
+  %v31 = phi i8* [ %v25, %b1 ], [ %v45, %b2 ]
+  %v32 = phi i8* [ %v23, %b1 ], [ %v48, %b2 ]
+  %v33 = phi i8* [ %v21, %b1 ], [ %v51, %b2 ]
+  %v34 = phi i8* [ %v28, %b1 ], [ %v89, %b2 ]
+  %v35 = phi i32 [ 0, %b1 ], [ %v90, %b2 ]
+  %v36 = phi <16 x i32> [ %v27, %b1 ], [ %v42, %b2 ]
+  %v37 = phi <16 x i32> [ %v26, %b1 ], [ %v44, %b2 ]
+  %v38 = phi <16 x i32> [ %v24, %b1 ], [ %v47, %b2 ]
+  %v39 = phi <16 x i32> [ %v22, %b1 ], [ %v50, %b2 ]
+  %v40 = getelementptr inbounds i8, i8* %v29, i32 64
+  %v41 = bitcast i8* %v40 to <16 x i32>*
+  %v42 = load <16 x i32>, <16 x i32>* %v41, align 64, !tbaa !4
+  %v43 = bitcast i8* %v31 to <16 x i32>*
+  %v44 = load <16 x i32>, <16 x i32>* %v43, align 64, !tbaa !4
+  %v45 = getelementptr inbounds i8, i8* %v31, i32 64
+  %v46 = bitcast i8* %v32 to <16 x i32>*
+  %v47 = load <16 x i32>, <16 x i32>* %v46, align 64, !tbaa !4
+  %v48 = getelementptr inbounds i8, i8* %v32, i32 64
+  %v49 = bitcast i8* %v33 to <16 x i32>*
+  %v50 = load <16 x i32>, <16 x i32>* %v49, align 64, !tbaa !4
+  %v51 = getelementptr inbounds i8, i8* %v33, i32 64
+  %v52 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v42, <16 x i32> %v36, i32 4)
+  %v53 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v44, <16 x i32> %v37, i32 4)
+  %v54 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v47, <16 x i32> %v38, i32 4)
+  %v55 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v50, <16 x i32> %v39, i32 4)
+  %v56 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v52, <16 x i32> %v36)
+  %v57 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v53, <16 x i32> %v37)
+  %v58 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v54, <16 x i32> %v38)
+  %v59 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v55, <16 x i32> %v39)
+  %v60 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v56, i32 %v1, i32 0)
+  %v61 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v56, i32 %v1, i32 1)
+  %v62 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v60, <32 x i32> %v57, i32 %v4, i32 0)
+  %v63 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v61, <32 x i32> %v57, i32 %v4, i32 1)
+  %v64 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v62, <32 x i32> %v58, i32 %v7, i32 0)
+  %v65 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v63, <32 x i32> %v58, i32 %v7, i32 1)
+  %v66 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v65)
+  %v67 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v65)
+  %v68 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v66, <16 x i32> %v67, i32 %a2)
+  %v69 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v64)
+  %v70 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v64)
+  %v71 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v69, <16 x i32> %v70, i32 %a2)
+  %v72 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v68, <16 x i32> %v71)
+  %v73 = bitcast i8* %v30 to <16 x i32>*
+  store <16 x i32> %v72, <16 x i32>* %v73, align 64, !tbaa !4
+  %v74 = getelementptr inbounds i8, i8* %v30, i32 64
+  %v75 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v57, i32 %v1, i32 0)
+  %v76 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v57, i32 %v1, i32 1)
+  %v77 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v75, <32 x i32> %v58, i32 %v4, i32 0)
+  %v78 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v76, <32 x i32> %v58, i32 %v4, i32 1)
+  %v79 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v77, <32 x i32> %v59, i32 %v7, i32 0)
+  %v80 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v78, <32 x i32> %v59, i32 %v7, i32 1)
+  %v81 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v80)
+  %v82 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v80)
+  %v83 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v81, <16 x i32> %v82, i32 %a2)
+  %v84 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v79)
+  %v85 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v79)
+  %v86 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v84, <16 x i32> %v85, i32 %a2)
+  %v87 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v83, <16 x i32> %v86)
+  %v88 = bitcast i8* %v34 to <16 x i32>*
+  store <16 x i32> %v87, <16 x i32>* %v88, align 64, !tbaa !4
+  %v89 = getelementptr inbounds i8, i8* %v34, i32 64
+  %v90 = add nsw i32 %v35, 64
+  %v91 = icmp slt i32 %v90, %a4
+  br i1 %v91, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32>, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32>, <32 x i32>, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!2, !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/v6vect-locals1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vect-locals1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vect-locals1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vect-locals1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,41 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+; Checking for alignment of stack to 64.
+; CHECK: r{{[0-9]+}} = and(r{{[0-9]+}},#-64)
+
+target triple = "hexagon"
+
+%s.0 = type { i32, i32, i32, i32, i32 }
+
+ at g0 = private unnamed_addr constant [7 x i8] c"%x %x\0A\00", align 8
+ at g1 = global %s.0 { i32 11, i32 13, i32 15, i32 17, i32 19 }, align 4
+ at g2 = global <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>, align 64
+
+; Function Attrs: nounwind
+declare i32 @f0(i8* nocapture, ...) #0
+
+; Function Attrs: nounwind
+define void @f1(%s.0* byval %a0, <16 x i32> %a1) #0 {
+b0:
+  %v0 = alloca <16 x i32>, align 64
+  store <16 x i32> %a1, <16 x i32>* %v0, align 64, !tbaa !0
+  %v1 = ptrtoint %s.0* %a0 to i32
+  %v2 = ptrtoint <16 x i32>* %v0 to i32
+  %v3 = call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g0, i32 0, i32 0), i32 %v1, i32 %v2) #0
+  ret void
+}
+
+; Function Attrs: nounwind
+define i32 @f2() #0 {
+b0:
+  %v0 = load <16 x i32>, <16 x i32>* @g2, align 64, !tbaa !0
+  tail call void @f1(%s.0* byval @g1, <16 x i32> %v0)
+  ret i32 0
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/v6vect-no-sideeffects.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vect-no-sideeffects.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vect-no-sideeffects.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vect-no-sideeffects.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,132 @@
+; RUN: llc -march=hexagon -enable-pipeliner=false < %s | FileCheck %s
+
+; Test that the vsplat and vmemu are not all serialized due to chain edges
+; caused by the hasSideEffects flag. The exact code generation may change
+; due to the scheduling changes, but we shouldn't see a series of
+; vsplat and vmemu instructions that each occur in a single packet.
+
+; CHECK: loop0(.LBB0_[[LOOP:.]],
+; CHECK: .LBB0_[[LOOP]]:
+; CHECK: vsplat
+; CHECK-NEXT: vsplat
+; CHECK: vsplat
+; CHECK-NEXT: vsplat
+; CHECK: endloop0
+
+ at g0 = global [256 x i8] c"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^00226644,,..**8888::66,,,,&&^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^22000022..4444>>::8888**..^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^<<66220000226644<<>>::^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^>><<446622000022>>", align 64
+
+; Function Attrs: nounwind
+define void @f0(i16** noalias nocapture readonly %a0, i16* noalias nocapture readonly %a1, i32* noalias nocapture %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) #0 {
+b0:
+  %v0 = load <16 x i32>, <16 x i32>* bitcast ([256 x i8]* @g0 to <16 x i32>*), align 64, !tbaa !0
+  %v1 = load <16 x i32>, <16 x i32>* bitcast (i8* getelementptr inbounds ([256 x i8], [256 x i8]* @g0, i32 0, i32 64) to <16 x i32>*), align 64, !tbaa !0
+  %v2 = load <16 x i32>, <16 x i32>* bitcast (i8* getelementptr inbounds ([256 x i8], [256 x i8]* @g0, i32 0, i32 128) to <16 x i32>*), align 64, !tbaa !0
+  %v3 = load <16 x i32>, <16 x i32>* bitcast (i8* getelementptr inbounds ([256 x i8], [256 x i8]* @g0, i32 0, i32 192) to <16 x i32>*), align 64, !tbaa !0
+  %v4 = icmp sgt i32 %a5, 0
+  br i1 %v4, label %b1, label %b5
+
+b1:                                               ; preds = %b0
+  %v5 = bitcast i32* %a2 to <16 x i32>*
+  %v6 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
+  %v7 = bitcast i16* %a1 to i64*
+  %v8 = mul nsw i32 %a3, 4
+  %v9 = add i32 %v8, %a6
+  %v10 = add i32 %v9, 32
+  %v11 = add i32 %a5, -1
+  br label %b2
+
+b2:                                               ; preds = %b4, %b1
+  %v12 = phi i32 [ 0, %b1 ], [ %v59, %b4 ]
+  %v13 = phi <16 x i32>* [ %v5, %b1 ], [ %v58, %b4 ]
+  %v14 = getelementptr i16*, i16** %a0, i32 %v12
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v15 = phi i16** [ %v14, %b2 ], [ %v57, %b3 ]
+  %v16 = phi i32 [ 0, %b2 ], [ %v55, %b3 ]
+  %v17 = phi i64* [ %v7, %b2 ], [ %v23, %b3 ]
+  %v18 = phi <16 x i32> [ %v6, %b2 ], [ %v54, %b3 ]
+  %v19 = load i16*, i16** %v15, align 4, !tbaa !3
+  %v20 = getelementptr inbounds i16, i16* %v19, i32 %v9
+  %v21 = getelementptr inbounds i64, i64* %v17, i32 1
+  %v22 = load i64, i64* %v17, align 8, !tbaa !0
+  %v23 = getelementptr inbounds i64, i64* %v17, i32 2
+  %v24 = load i64, i64* %v21, align 8, !tbaa !0
+  %v25 = trunc i64 %v22 to i32
+  %v26 = lshr i64 %v22, 32
+  %v27 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v25)
+  %v28 = trunc i64 %v26 to i32
+  %v29 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v28)
+  %v30 = trunc i64 %v24 to i32
+  %v31 = lshr i64 %v24, 32
+  %v32 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v30)
+  %v33 = trunc i64 %v31 to i32
+  %v34 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v33)
+  %v35 = bitcast i16* %v20 to <16 x i32>*
+  %v36 = load <16 x i32>, <16 x i32>* %v35, align 4, !tbaa !0
+  %v37 = getelementptr inbounds i16, i16* %v19, i32 %v10
+  %v38 = bitcast i16* %v37 to <16 x i32>*
+  %v39 = load <16 x i32>, <16 x i32>* %v38, align 4, !tbaa !0
+  %v40 = tail call <16 x i32> @llvm.hexagon.V6.vpackeh(<16 x i32> %v39, <16 x i32> %v36)
+  %v41 = tail call <16 x i32> @llvm.hexagon.V6.vpackeh(<16 x i32> %v40, <16 x i32> %v40)
+  %v42 = tail call <16 x i32> @llvm.hexagon.V6.vdelta(<16 x i32> %v41, <16 x i32> %v0)
+  %v43 = tail call <16 x i32> @llvm.hexagon.V6.vdelta(<16 x i32> %v41, <16 x i32> %v1)
+  %v44 = tail call <16 x i32> @llvm.hexagon.V6.vdelta(<16 x i32> %v41, <16 x i32> %v2)
+  %v45 = tail call <16 x i32> @llvm.hexagon.V6.vdelta(<16 x i32> %v41, <16 x i32> %v3)
+  %v46 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v27, <16 x i32> %v42)
+  %v47 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v29, <16 x i32> %v43)
+  %v48 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v32, <16 x i32> %v44)
+  %v49 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v34, <16 x i32> %v45)
+  %v50 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32> %v46, <16 x i32> %v46)
+  %v51 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat.acc(<16 x i32> %v50, <16 x i32> %v47, <16 x i32> %v47)
+  %v52 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat.acc(<16 x i32> %v51, <16 x i32> %v48, <16 x i32> %v48)
+  %v53 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat.acc(<16 x i32> %v52, <16 x i32> %v49, <16 x i32> %v49)
+  %v54 = tail call <16 x i32> @llvm.hexagon.V6.vasrw.acc(<16 x i32> %v18, <16 x i32> %v53, i32 6)
+  %v55 = add nsw i32 %v16, 1
+  %v56 = icmp eq i32 %v16, 7
+  %v57 = getelementptr i16*, i16** %v15, i32 1
+  br i1 %v56, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  %v58 = getelementptr inbounds <16 x i32>, <16 x i32>* %v13, i32 1
+  store <16 x i32> %v54, <16 x i32>* %v13, align 64, !tbaa !0
+  %v59 = add nsw i32 %v12, 1
+  %v60 = icmp eq i32 %v12, %v11
+  br i1 %v60, label %b5, label %b2
+
+b5:                                               ; preds = %b4, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vd0() #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vpackeh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vdelta(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhvsat.acc(<16 x i32>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vasrw.acc(<16 x i32>, <16 x i32>, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!4, !4, i64 0}
+!4 = !{!"any pointer", !1, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/v6vect-pred2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vect-pred2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vect-pred2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vect-pred2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,39 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK-DAG: v{{[0-9]+}} = vsplat(r{{[0-9]+}})
+; CHECK-DAG: v{{[0-9]+}} = vsplat(r{{[0-9]+}})
+; CHECK-DAG: q{{[0-3]}} = vand(v{{[0-9]+}},r{{[0-9]+}})
+; CHECK: v{{[0-9]+}} = vmux(q{{[0-3]}},v{{[0-9]+}},v{{[0-9]+}})
+
+target triple = "hexagon"
+
+ at g0 = common global <16 x i32> zeroinitializer, align 64
+ at g1 = common global <16 x i32> zeroinitializer, align 64
+ at g2 = common global <16 x i32> zeroinitializer, align 64
+ at g3 = common global <16 x i32> zeroinitializer, align 64
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 11)
+  store <16 x i32> %v0, <16 x i32>* @g1, align 64, !tbaa !0
+  %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 12)
+  store <16 x i32> %v1, <16 x i32>* @g2, align 64, !tbaa !0
+  %v2 = load <16 x i32>, <16 x i32>* @g0, align 64, !tbaa !0
+  %v3 = bitcast <16 x i32> %v2 to <512 x i1>
+  %v4 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v3, <16 x i32> %v0, <16 x i32> %v1)
+  store <16 x i32> %v4, <16 x i32>* @g3, align 64, !tbaa !0
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/v6vect-spill-kill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vect-spill-kill.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vect-spill-kill.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vect-spill-kill.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,137 @@
+; RUN: llc -march=hexagon -O3 < %s
+; REQUIRES: asserts
+
+; Test that we don't assert because of requiring too many scavenger spill
+; slots. This happens because the kill flag wasn't added to the appropriate
+; operands for the spill code.
+
+define void @f0(i32 %a0, i8* noalias nocapture %a1) #0 {
+b0:
+  %v0 = tail call <32 x i32> @llvm.hexagon.V6.vshuffh.128B(<32 x i32> undef)
+  %v1 = sdiv i32 %a0, 128
+  %v2 = icmp sgt i32 %a0, 127
+  br i1 %v2, label %b1, label %b3
+
+b1:                                               ; preds = %b0
+  %v3 = bitcast i8* %a1 to <32 x i32>*
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v4 = phi <32 x i32>* [ %v3, %b1 ], [ undef, %b2 ]
+  %v5 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> zeroinitializer, i32 2)
+  %v6 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v5, <32 x i32> zeroinitializer)
+  %v7 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer)
+  %v8 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v7, <32 x i32> zeroinitializer)
+  %v9 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> zeroinitializer, <32 x i32> %v8, <32 x i32> zeroinitializer)
+  %v10 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v9, <32 x i32> zeroinitializer)
+  %v11 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> zeroinitializer, i32 4)
+  %v12 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v11, <32 x i32> zeroinitializer)
+  %v13 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> zeroinitializer, <32 x i32> zeroinitializer)
+  %v14 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> zeroinitializer, <32 x i32> undef)
+  %v15 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v12, <32 x i32> undef)
+  %v16 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v13, <32 x i32> undef)
+  %v17 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v10, <32 x i32> zeroinitializer)
+  %v18 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v14, <32 x i32> %v17, <32 x i32> zeroinitializer)
+  %v19 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v15, <32 x i32> %v18, <32 x i32> zeroinitializer)
+  %v20 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v16, <32 x i32> %v19, <32 x i32> zeroinitializer)
+  %v21 = getelementptr inbounds i8, i8* null, i32 undef
+  %v22 = bitcast i8* %v21 to <32 x i32>*
+  %v23 = load <32 x i32>, <32 x i32>* %v22, align 128, !tbaa !0
+  %v24 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v23, <32 x i32> zeroinitializer)
+  %v25 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v24, <32 x i32> undef)
+  %v26 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v25, <32 x i32> %v20, <32 x i32> zeroinitializer)
+  %v27 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v26, <32 x i32> zeroinitializer)
+  %v28 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v27, <32 x i32> zeroinitializer)
+  %v29 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v28, <32 x i32> zeroinitializer)
+  %v30 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v29, <32 x i32> zeroinitializer)
+  %v31 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v30, <32 x i32> zeroinitializer)
+  %v32 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v31, <32 x i32> zeroinitializer)
+  %v33 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v32, <32 x i32> zeroinitializer)
+  %v34 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v33, <32 x i32> zeroinitializer)
+  %v35 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v34, <32 x i32> zeroinitializer)
+  %v36 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 1)
+  %v37 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 1)
+  %v38 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 2)
+  %v39 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v36, <32 x i32> zeroinitializer)
+  %v40 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v37, <32 x i32> zeroinitializer)
+  %v41 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v38, <32 x i32> zeroinitializer)
+  %v42 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v39, <32 x i32> undef)
+  %v43 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v40, <32 x i32> undef)
+  %v44 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v41, <32 x i32> undef)
+  %v45 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> undef, <32 x i32> undef)
+  %v46 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v42, <32 x i32> %v35, <32 x i32> zeroinitializer)
+  %v47 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v43, <32 x i32> %v46, <32 x i32> zeroinitializer)
+  %v48 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v44, <32 x i32> %v47, <32 x i32> zeroinitializer)
+  %v49 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v45, <32 x i32> %v48, <32 x i32> zeroinitializer)
+  %v50 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 4)
+  %v51 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 4)
+  %v52 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> undef, <32 x i32> zeroinitializer)
+  %v53 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v50, <32 x i32> zeroinitializer)
+  %v54 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v51, <32 x i32> zeroinitializer)
+  %v55 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v52, <32 x i32> undef)
+  %v56 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v53, <32 x i32> undef)
+  %v57 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v54, <32 x i32> undef)
+  %v58 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v49, <32 x i32> zeroinitializer)
+  %v59 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v55, <32 x i32> %v58, <32 x i32> zeroinitializer)
+  %v60 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v56, <32 x i32> %v59, <32 x i32> zeroinitializer)
+  %v61 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v57, <32 x i32> %v60, <32 x i32> zeroinitializer)
+  %v62 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> zeroinitializer, <32 x i32> %v61, <32 x i32> undef, i32 5)
+  %v63 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuhv.128B(<32 x i32> undef, <32 x i32> undef)
+  %v64 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v62)
+  %v65 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuhv.128B(<32 x i32> zeroinitializer, <32 x i32> %v64)
+  %v66 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v63)
+  %v67 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v63)
+  %v68 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> %v66, <32 x i32> %v67, i32 14)
+  %v69 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v65)
+  %v70 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> %v69, <32 x i32> undef, i32 14)
+  %v71 = tail call <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32> %v70, <32 x i32> %v68)
+  store <32 x i32> %v71, <32 x i32>* %v4, align 128, !tbaa !0
+  %v72 = icmp slt i32 0, %v1
+  br i1 %v72, label %b2, label %b3
+
+b3:                                               ; preds = %b2, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vshuffh.128B(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1>, <32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32>, <32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vmpyuhv.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vshuffeb.128B(<32 x i32>, <32 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/v6vect-vmem1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vect-vmem1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vect-vmem1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vect-vmem1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,24 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; CHECK: vmem(r{{[0-9]*}}+#1) =
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define i32 @f0(<16 x i32>* %a0, <32 x i32>* %a1) #0 {
+b0:
+  %v0 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
+  store <16 x i32> %v0, <16 x i32>* %a0, align 64
+  %v1 = load <16 x i32>, <16 x i32>* %a0, align 64
+  %v2 = call <32 x i32> @llvm.hexagon.V6.vunpackh(<16 x i32> %v1)
+  store <32 x i32> %v2, <32 x i32>* %a1, align 64
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vunpackh(<16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/v6vect-vsplat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vect-vsplat.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/v6vect-vsplat.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/v6vect-vsplat.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,64 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+; CHECK-NOT: vsplat
+; CHECK: call f2
+; CHECK: v{{[0-9]+}} = vsplat
+; CHECK: v{{[0-9]+}} = vsplat
+; CHECK: v{{[0-9]+}} = vsplat
+; CHECK: v{{[0-9]+}} = vsplat
+
+target triple = "hexagon"
+
+ at g0 = common global [2 x <32 x i32>] zeroinitializer, align 128
+ at g1 = common global <32 x i32> zeroinitializer, align 128
+ at g2 = common global [2 x <16 x i32>] zeroinitializer, align 64
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  tail call void @f1() #2
+  %v0 = tail call i32 @f2(i8 zeroext 0) #2
+  %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1) #2
+  store <16 x i32> %v1, <16 x i32>* getelementptr inbounds ([2 x <16 x i32>], [2 x <16 x i32>]* @g2, i32 0, i32 0), align 64, !tbaa !0
+  %v2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2) #2
+  store <16 x i32> %v2, <16 x i32>* getelementptr inbounds ([2 x <16 x i32>], [2 x <16 x i32>]* @g2, i32 0, i32 1), align 64, !tbaa !0
+  %v3 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v1, <16 x i32> %v2) #2
+  store <32 x i32> %v3, <32 x i32>* getelementptr inbounds ([2 x <32 x i32>], [2 x <32 x i32>]* @g0, i32 0, i32 0), align 128, !tbaa !0
+  store <32 x i32> %v3, <32 x i32>* getelementptr inbounds ([2 x <32 x i32>], [2 x <32 x i32>]* @g0, i32 0, i32 1), align 128, !tbaa !0
+  %v4 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v3, <32 x i32> %v3, i32 -2147483648)
+  store <32 x i32> %v4, <32 x i32>* @g1, align 128, !tbaa !0
+  ret i32 0
+}
+
+declare void @f1() #0
+
+declare i32 @f2(i8 zeroext) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind
+define void @f3() #0 {
+b0:
+  %v0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
+  store <16 x i32> %v0, <16 x i32>* getelementptr inbounds ([2 x <16 x i32>], [2 x <16 x i32>]* @g2, i32 0, i32 0), align 64, !tbaa !0
+  %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2)
+  store <16 x i32> %v1, <16 x i32>* getelementptr inbounds ([2 x <16 x i32>], [2 x <16 x i32>]* @g2, i32 0, i32 1), align 64, !tbaa !0
+  %v2 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v0, <16 x i32> %v1)
+  store <32 x i32> %v2, <32 x i32>* getelementptr inbounds ([2 x <32 x i32>], [2 x <32 x i32>]* @g0, i32 0, i32 0), align 128, !tbaa !0
+  store <32 x i32> %v2, <32 x i32>* getelementptr inbounds ([2 x <32 x i32>], [2 x <32 x i32>]* @g0, i32 0, i32 1), align 128, !tbaa !0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/vadd1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vadd1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vadd1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vadd1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,28 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: v{{[0-9]*}}.w = vadd
+
+target triple = "hexagon"
+
+ at g0 = common global <16 x i32> zeroinitializer, align 64
+ at g1 = common global <16 x i32> zeroinitializer, align 64
+ at g2 = common global <16 x i32> zeroinitializer, align 64
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  %v0 = load <16 x i32>, <16 x i32>* @g0, align 32, !tbaa !0
+  %v1 = load <16 x i32>, <16 x i32>* @g1, align 32, !tbaa !0
+  %v2 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v0, <16 x i32> %v1)
+  store <16 x i32> %v2, <16 x i32>* @g2, align 64, !tbaa !0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/varargs-memv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/varargs-memv.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/varargs-memv.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/varargs-memv.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+; Check that llc does not crash.
+
+ at g0 = private unnamed_addr constant [7 x i8] c"%d\09\09%d\00", align 1
+ at g1 = common global <4 x i32> zeroinitializer, align 16
+
+declare i32 @f0(...)
+
+; Function Attrs: nounwind
+define i32 @f1() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  %v2 = alloca [0 x <4 x i32>], align 16
+  store i32 0, i32* %v0
+  store i32 0, i32* %v1, align 4
+  %v3 = bitcast [0 x <4 x i32>]* %v2 to i8*
+  call void @llvm.memset.p0i8.i32(i8* align 16 %v3, i8 0, i32 0, i1 false)
+  %v4 = load i32, i32* %v1, align 4
+  %v5 = add nsw i32 %v4, 1
+  store i32 %v5, i32* %v1, align 4
+  %v6 = load <4 x i32>, <4 x i32>* @g1, align 16
+  %v7 = call i32 bitcast (i32 (...)* @f0 to i32 (i8*, i32, <4 x i32>)*)(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g0, i32 0, i32 0), i32 %v5, <4 x i32> %v6)
+  ret i32 0
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/vasrh.select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vasrh.select.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vasrh.select.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vasrh.select.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+
+; We do not want to see a 'cannot select' error,
+; we would like to see a vasrh instruction
+; CHECK: vasrh
+
+target triple = "hexagon"
+
+ at g0 = global [6 x i64] [i64 0, i64 1, i64 10000, i64 -9223372036854775808, i64 9223372036854775807, i64 -1], align 8
+ at g1 = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = load i64, i64* getelementptr inbounds ([6 x i64], [6 x i64]* @g0, i32 0, i32 0), align 8, !tbaa !0
+  %v1 = tail call i64 @llvm.hexagon.S2.asr.i.vh(i64 %v0, i32 62)
+  %v2 = trunc i64 %v1 to i32
+  store i32 %v2, i32* @g1, align 4, !tbaa !4
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"long long", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"int", !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/vcombine128_to_req_seq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vcombine128_to_req_seq.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vcombine128_to_req_seq.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vcombine128_to_req_seq.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,81 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+; Test that we convert 128B vcombine instructions to REG_SEQUENCE instructions.
+
+; CHECK-LABEL: f0:
+; CHECK-NOT: vcombine
+define void @f0(i8* nocapture readonly %a0, i8* nocapture readonly %a1, i32 %a2, i8* nocapture %a3, i32 %a4, i32 %a5) #0 {
+b0:
+  %v0 = bitcast i8* %a1 to i64*
+  %v1 = load i64, i64* %v0, align 8
+  %v2 = shl i64 %v1, 8
+  %v3 = trunc i64 %v2 to i32
+  %v4 = trunc i64 %v1 to i32
+  %v5 = and i32 %v4, 16777215
+  %v6 = bitcast i8* %a0 to <32 x i32>*
+  %v7 = load <32 x i32>, <32 x i32>* %v6, align 128
+  %v8 = getelementptr inbounds i8, i8* %a0, i32 32
+  %v9 = bitcast i8* %v8 to <32 x i32>*
+  %v10 = load <32 x i32>, <32 x i32>* %v9, align 128
+  %v11 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v10, <32 x i32> %v7)
+  %v12 = tail call <64 x i32> @llvm.hexagon.V6.vrmpybusi.128B(<64 x i32> %v11, i32 %v5, i32 0)
+  %v13 = tail call <64 x i32> @llvm.hexagon.V6.vrmpybusi.128B(<64 x i32> %v11, i32 %v3, i32 0)
+  %v14 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v12)
+  %v15 = tail call <32 x i32> @llvm.hexagon.V6.vasrwuhsat.128B(<32 x i32> %v14, <32 x i32> %v14, i32 %a2)
+  %v16 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v13)
+  %v17 = tail call <32 x i32> @llvm.hexagon.V6.vasrwuhsat.128B(<32 x i32> %v16, <32 x i32> %v16, i32 %a2)
+  %v18 = getelementptr inbounds i8, i8* %a3, i32 32
+  %v19 = bitcast i8* %v18 to <32 x i32>*
+  store <32 x i32> %v15, <32 x i32>* %v19, align 128
+  %v20 = bitcast i8* %a3 to <32 x i32>*
+  store <32 x i32> %v17, <32 x i32>* %v20, align 128
+  ret void
+}
+
+; CHECK-LABEL: f1:
+; CHECK-NOT: vcombine
+define void @f1() #0 {
+b0:
+  br i1 undef, label %b1, label %b3
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi <64 x i32> [ %v6, %b1 ], [ undef, %b0 ]
+  %v1 = tail call <64 x i32> @llvm.hexagon.V6.vmpybus.acc.128B(<64 x i32> %v0, <32 x i32> undef, i32 16843009)
+  %v2 = tail call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %v1, <64 x i32> undef, i32 16843009)
+  %v3 = tail call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %v2, <64 x i32> undef, i32 16843009)
+  %v4 = tail call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %v3, <64 x i32> undef, i32 16843009)
+  %v5 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> undef, <32 x i32> undef)
+  %v6 = tail call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %v4, <64 x i32> %v5, i32 16843009)
+  br i1 false, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v7 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v6)
+  unreachable
+
+b3:                                               ; preds = %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vrmpybusi.128B(<64 x i32>, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vasrwuhsat.128B(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vmpybus.acc.128B(<64 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32>, <64 x i32>, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/vcombine_subreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vcombine_subreg.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vcombine_subreg.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vcombine_subreg.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,28 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+ at g0 = common global <16 x i32> zeroinitializer, align 64
+ at g1 = common global <32 x i32> zeroinitializer, align 128
+ at g2 = common global <32 x i32> zeroinitializer, align 128
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  %v0 = load <16 x i32>, <16 x i32>* @g0, align 64
+  %v1 = load <32 x i32>, <32 x i32>* @g1, align 128
+  %v2 = call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v1)
+  %v3 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v0, <16 x i32> %v2)
+  store <32 x i32> %v3, <32 x i32>* @g2, align 128
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/vcombine_to_req_seq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vcombine_to_req_seq.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vcombine_to_req_seq.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vcombine_to_req_seq.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,48 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK-NOT: vcombine
+
+define void @f0(i8* nocapture readonly %a0, i8* nocapture readonly %a1, i32 %a2, i8* nocapture %a3, i32 %a4, i32 %a5) #0 {
+b0:
+  %v0 = bitcast i8* %a1 to i64*
+  %v1 = load i64, i64* %v0, align 8
+  %v2 = shl i64 %v1, 8
+  %v3 = trunc i64 %v2 to i32
+  %v4 = trunc i64 %v1 to i32
+  %v5 = and i32 %v4, 16777215
+  %v6 = bitcast i8* %a0 to <16 x i32>*
+  %v7 = load <16 x i32>, <16 x i32>* %v6, align 64
+  %v8 = getelementptr inbounds i8, i8* %a0, i32 32
+  %v9 = bitcast i8* %v8 to <16 x i32>*
+  %v10 = load <16 x i32>, <16 x i32>* %v9, align 64
+  %v11 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v10, <16 x i32> %v7)
+  %v12 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v11, i32 %v5, i32 0)
+  %v13 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v11, i32 %v3, i32 0)
+  %v14 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v12)
+  %v15 = tail call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %v14, <16 x i32> %v14, i32 %a2)
+  %v16 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v13)
+  %v17 = tail call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %v16, <16 x i32> %v16, i32 %a2)
+  %v18 = getelementptr inbounds i8, i8* %a3, i32 32
+  %v19 = bitcast i8* %v18 to <16 x i32>*
+  store <16 x i32> %v15, <16 x i32>* %v19, align 64
+  %v20 = bitcast i8* %a3 to <16 x i32>*
+  store <16 x i32> %v17, <16 x i32>* %v20, align 64
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32>, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/vec-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vec-align.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vec-align.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vec-align.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,46 @@
+; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
+
+; Make sure we generate stack alignment.
+; CHECK: [[REG1:r[0-9]*]] = and(r29,#-64)
+; CHECK: = add([[REG1]],#128)
+; CHECK: = add([[REG1]],#64)
+; Make sure we do not generate another -64 off SP.
+; CHECK: vmem(
+; CHECK-NOT: r{{[0-9]*}} = add(r29,#-64)
+
+target triple = "hexagon"
+
+ at g0 = common global <16 x i32> zeroinitializer, align 64
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca <16 x i32>, align 64
+  %v2 = alloca <16 x i32>, align 64
+  store i32 0, i32* %v0
+  %v3 = call i32 @f1(i8 zeroext 0)
+  %v4 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
+  store <16 x i32> %v4, <16 x i32>* %v1, align 64
+  %v5 = call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 12)
+  store <16 x i32> %v5, <16 x i32>* %v2, align 64
+  %v6 = load <16 x i32>, <16 x i32>* %v1, align 64
+  %v7 = load <16 x i32>, <16 x i32>* %v2, align 64
+  %v8 = call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v6, <16 x i32> %v7)
+  store <16 x i32> %v8, <16 x i32>* @g0, align 64
+  call void bitcast (void (...)* @f2 to void ()*)()
+  ret i32 0
+}
+
+declare i32 @f1(i8 zeroext) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32>, <16 x i32>) #1
+
+declare void @f2(...) #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/vec-call-full1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vec-call-full1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vec-call-full1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vec-call-full1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,19 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+; CHECK-DAG: v{{[0-9]+}} = vmem(r{{[0-9]+}}+#0)
+; CHECK-DAG: v{{[0-9]+}} = vmem(r{{[0-9]+}}+#0)
+; CHECK-DAG: vmem(r{{[0-9]+}}+#{{[0-1]}}) = v{{[0-9]+}}
+; CHECK-DAG: vmem(r{{[0-9]+}}+#{{[0-1]}}) = v{{[0-9]+}}
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(<32 x i32> %a0, <32 x i32> %a1, <32 x i32> %a2, <32 x i32> %a3, <32 x i32> %a4, <32 x i32> %a5, <32 x i32> %a6, <32 x i32> %a7, <32 x i32> %a8, <32 x i32> %a9, <32 x i32> %a10, <32 x i32> %a11, <32 x i32> %a12, <32 x i32> %a13, <32 x i32> %a14, <32 x i32> %a15, <32 x i32> %a16, <32 x i32> %a17) #0 {
+b0:
+  tail call void @f1(<32 x i32> %a1, <32 x i32> %a2, <32 x i32> %a3, <32 x i32> %a4, <32 x i32> %a5, <32 x i32> %a6, <32 x i32> %a7, <32 x i32> %a8, <32 x i32> %a9, <32 x i32> %a10, <32 x i32> %a11, <32 x i32> %a12, <32 x i32> %a13, <32 x i32> %a14, <32 x i32> %a15, <32 x i32> %a16, <32 x i32> %a17, <32 x i32> %a0) #0
+  ret void
+}
+
+declare void @f1(<32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>, <32 x i32>) #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }

Added: llvm/trunk/test/CodeGen/Hexagon/vecPred2Vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vecPred2Vec.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vecPred2Vec.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vecPred2Vec.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,34 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK: q{{[0-3]}} = vand(v{{[0-9]+}},r{{[0-9]+}})
+; CHECK: q{{[0-3]}} = vand(v{{[0-9]+}},r{{[0-9]+}})
+; CHECK: q{{[0-3]}} = and(q{{[0-3]}},q{{[0-3]}})
+
+target triple = "hexagon"
+
+ at g0 = common global <16 x i32> zeroinitializer, align 64
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1)
+  %v1 = bitcast <16 x i32> %v0 to <512 x i1>
+  %v2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2)
+  %v3 = bitcast <16 x i32> %v2 to <512 x i1>
+  %v4 = tail call <512 x i1> @llvm.hexagon.V6.pred.and(<512 x i1> %v1, <512 x i1> %v3)
+  %v5 = bitcast <512 x i1> %v4 to <16 x i32>
+  store <16 x i32> %v5, <16 x i32>* @g0, align 64, !tbaa !0
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.hexagon.V6.pred.and(<512 x i1>, <512 x i1>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/vect-any_extend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vect-any_extend.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vect-any_extend.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vect-any_extend.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,16 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+; Used to fail with "Cannot select: 0x17300f0: v2i32 = any_extend"
+
+target triple = "hexagon-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  %v0 = load <4 x i8>, <4 x i8>* undef, align 8
+  %v1 = zext <4 x i8> %v0 to <4 x i32>
+  store <4 x i32> %v1, <4 x i32>* undef, align 8
+  unreachable
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/vect-dbl-post-inc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vect-dbl-post-inc.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vect-dbl-post-inc.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vect-dbl-post-inc.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,40 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that we generate a post-increment when using double hvx (128B)
+; post-increment operations.
+
+; CHECK: = vmem(r{{[0-9]+}}++#1)
+; CHECK: vmem(r{{[0-9]+}}++#1)
+
+; Function Attrs: nounwind
+define void @f0(i8* noalias nocapture readonly %a0, i8* noalias nocapture %a1, i32 %a2) #0 {
+b0:
+  %v0 = icmp sgt i32 %a2, 0
+  br i1 %v0, label %b1, label %b3
+
+b1:                                               ; preds = %b0
+  %v1 = bitcast i8* %a0 to <32 x i32>*
+  %v2 = bitcast i8* %a1 to <32 x i32>*
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v3 = phi <32 x i32>* [ %v9, %b2 ], [ %v1, %b1 ]
+  %v4 = phi <32 x i32>* [ %v10, %b2 ], [ %v2, %b1 ]
+  %v5 = phi i32 [ %v7, %b2 ], [ 0, %b1 ]
+  %v6 = load <32 x i32>, <32 x i32>* %v3, align 128, !tbaa !0
+  store <32 x i32> %v6, <32 x i32>* %v4, align 128, !tbaa !0
+  %v7 = add nsw i32 %v5, 1
+  %v8 = icmp eq i32 %v7, %a2
+  %v9 = getelementptr <32 x i32>, <32 x i32>* %v3, i32 1
+  %v10 = getelementptr <32 x i32>, <32 x i32>* %v4, i32 1
+  br i1 %v8, label %b3, label %b2
+
+b3:                                               ; preds = %b2, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/vect-set_cc_v2i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vect-set_cc_v2i32.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vect-set_cc_v2i32.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vect-set_cc_v2i32.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,179 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: vcmp{{.*}}
+
+target triple = "hexagon"
+
+%s.0 = type { i16, i16, i16, [4 x i8*], i32, i32, i32, %s.1*, %s.3, i16, i16, i16, i16, i16, %s.4 }
+%s.1 = type { %s.1*, %s.2* }
+%s.2 = type { i16, i16 }
+%s.3 = type { i32, i16*, i16*, i32* }
+%s.4 = type { i8 }
+
+ at g0 = private unnamed_addr constant [7 x i8] c"Static\00", align 1
+ at g1 = private unnamed_addr constant [5 x i8] c"Heap\00", align 1
+ at g2 = private unnamed_addr constant [6 x i8] c"Stack\00", align 1
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i8** nocapture %a1) #0 {
+b0:
+  %v0 = alloca [1 x %s.0], align 8
+  %v1 = call i32 @f1(i32 5) #0
+  %v2 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 6
+  %v3 = icmp eq i32 %v1, 0
+  %v4 = select i1 %v3, i32 7, i32 %v1
+  store i32 %v4, i32* %v2, align 8, !tbaa !0
+  %v5 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 0
+  %v6 = bitcast [1 x %s.0]* %v0 to i32*
+  %v7 = load i32, i32* %v6, align 8
+  %v8 = trunc i32 %v7 to i16
+  %v9 = icmp eq i16 %v8, 0
+  br i1 %v9, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v10 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 1
+  %v11 = icmp ult i32 %v7, 65536
+  br i1 %v11, label %b2, label %b4
+
+b2:                                               ; preds = %b1
+  %v12 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 2
+  %v13 = load i16, i16* %v12, align 4, !tbaa !4
+  %v14 = icmp eq i16 %v13, 0
+  br i1 %v14, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  store i16 0, i16* %v5, align 8, !tbaa !4
+  store i16 0, i16* %v10, align 2, !tbaa !4
+  store i16 102, i16* %v12, align 4, !tbaa !4
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2, %b1, %b0
+  %v15 = phi i16 [ 0, %b3 ], [ 0, %b2 ], [ 0, %b1 ], [ %v8, %b0 ]
+  %v16 = insertelement <1 x i32> undef, i32 %v4, i32 0
+  %v17 = shufflevector <1 x i32> %v16, <1 x i32> undef, <2 x i32> zeroinitializer
+  %v18 = and <2 x i32> %v17, <i32 1, i32 2>
+  %v19 = icmp ne <2 x i32> %v18, zeroinitializer
+  %v20 = zext <2 x i1> %v19 to <2 x i16>
+  %v21 = extractelement <2 x i16> %v20, i32 0
+  %v22 = extractelement <2 x i16> %v20, i32 1
+  %v23 = add i16 %v21, %v22
+  %v24 = lshr i32 %v4, 2
+  %v25 = trunc i32 %v24 to i16
+  %v26 = and i16 %v25, 1
+  %v27 = add i16 %v23, %v26
+  %v28 = getelementptr [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 4
+  %v29 = load i32, i32* %v28, align 8
+  %v30 = zext i16 %v27 to i32
+  %v31 = udiv i32 %v29, %v30
+  store i32 %v31, i32* %v28, align 8
+  %v32 = getelementptr [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 0
+  %v33 = and i32 %v4, 1
+  %v34 = icmp eq i32 %v33, 0
+  br i1 %v34, label %b5, label %b12
+
+b5:                                               ; preds = %b12, %b4
+  %v35 = phi i16 [ 0, %b4 ], [ 1, %b12 ]
+  %v36 = and i32 %v4, 2
+  %v37 = icmp eq i32 %v36, 0
+  br i1 %v37, label %b14, label %b13
+
+b6:                                               ; preds = %b16
+  %v38 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 1
+  %v39 = load i8*, i8** %v38, align 4, !tbaa !6
+  %v40 = bitcast i8* %v39 to %s.1*
+  %v41 = call %s.1* @f2(i32 %v31, %s.1* %v40, i16 signext %v15) #0
+  %v42 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 7
+  store %s.1* %v41, %s.1** %v42, align 4, !tbaa !6
+  %v43 = load i32, i32* %v2, align 8, !tbaa !0
+  br label %b7
+
+b7:                                               ; preds = %b16, %b6
+  %v44 = phi i32 [ %v4, %b16 ], [ %v43, %b6 ]
+  %v45 = and i32 %v44, 2
+  %v46 = icmp eq i32 %v45, 0
+  br i1 %v46, label %b9, label %b8
+
+b8:                                               ; preds = %b7
+  %v47 = load i32, i32* %v28, align 8, !tbaa !0
+  %v48 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 2
+  %v49 = load i8*, i8** %v48, align 8, !tbaa !6
+  %v50 = load i32, i32* %v6, align 8
+  %v51 = shl i32 %v50, 16
+  %v52 = ashr exact i32 %v51, 16
+  %v53 = and i32 %v50, -65536
+  %v54 = or i32 %v53, %v52
+  %v55 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 8
+  %v56 = call i32 @f3(i32 %v47, i8* %v49, i32 %v54, %s.3* %v55) #0
+  %v57 = load i32, i32* %v2, align 8, !tbaa !0
+  br label %b9
+
+b9:                                               ; preds = %b8, %b7
+  %v58 = phi i32 [ %v44, %b7 ], [ %v57, %b8 ]
+  %v59 = and i32 %v58, 4
+  %v60 = icmp eq i32 %v59, 0
+  br i1 %v60, label %b11, label %b10
+
+b10:                                              ; preds = %b9
+  %v61 = load i32, i32* %v28, align 8, !tbaa !0
+  %v62 = load i16, i16* %v5, align 8, !tbaa !4
+  %v63 = getelementptr inbounds [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 3
+  %v64 = load i8*, i8** %v63, align 4, !tbaa !6
+  call void @f4(i32 %v61, i16 signext %v62, i8* %v64) #0
+  br label %b11
+
+b11:                                              ; preds = %b10, %b9
+  ret i32 0
+
+b12:                                              ; preds = %b4
+  %v65 = getelementptr [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 1
+  %v66 = load i8*, i8** %v32, align 8
+  store i8* %v66, i8** %v65, align 4
+  br label %b5
+
+b13:                                              ; preds = %b5
+  %v67 = getelementptr [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 2
+  %v68 = load i8*, i8** %v32, align 8
+  %v69 = zext i16 %v35 to i32
+  %v70 = sub i32 0, %v69
+  %v71 = and i32 %v31, %v70
+  %v72 = getelementptr inbounds i8, i8* %v68, i32 %v71
+  store i8* %v72, i8** %v67, align 8
+  %v73 = add i16 %v35, 1
+  br label %b14
+
+b14:                                              ; preds = %b13, %b5
+  %v74 = phi i16 [ %v35, %b5 ], [ %v73, %b13 ]
+  %v75 = and i32 %v4, 4
+  %v76 = icmp eq i32 %v75, 0
+  br i1 %v76, label %b16, label %b15
+
+b15:                                              ; preds = %b14
+  %v77 = getelementptr [1 x %s.0], [1 x %s.0]* %v0, i32 0, i32 0, i32 3, i32 3
+  %v78 = load i8*, i8** %v32, align 8
+  %v79 = zext i16 %v74 to i32
+  %v80 = mul i32 %v31, %v79
+  %v81 = getelementptr inbounds i8, i8* %v78, i32 %v80
+  store i8* %v81, i8** %v77, align 4
+  br label %b16
+
+b16:                                              ; preds = %b15, %b14
+  br i1 %v34, label %b7, label %b6
+}
+
+declare i32 @f1(i32) #0
+
+declare %s.1* @f2(i32, %s.1*, i16 signext) #0
+
+declare i32 @f3(i32, i8*, i32, %s.3*) #0
+
+declare void @f4(i32, i16 signext, i8*) #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"short", !2}
+!6 = !{!7, !7, i64 0}
+!7 = !{!"any pointer", !2}

Added: llvm/trunk/test/CodeGen/Hexagon/vect-vd0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vect-vd0.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vect-vd0.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vect-vd0.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,22 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Verify __builtin_HEXAGON_V6_vd0 maps to vxor
+; CHECK: v{{[0-9]*}} = vxor(v{{[0-9]*}},v{{[0-9]*}})
+
+ at g0 = common global <16 x i32> zeroinitializer, align 64
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  store i32 %a0, i32* %v0, align 4
+  %v1 = call <16 x i32> @llvm.hexagon.V6.vd0()
+  store <16 x i32> %v1, <16 x i32>* @g0, align 64
+  ret i32 ptrtoint (<16 x i32>* @g0 to i32)
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vd0() #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/vect-zero_extend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vect-zero_extend.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vect-zero_extend.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vect-zero_extend.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,25 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+; Used to fail with "Cannot select: 0x16cb2d0: v4i16 = zero_extend"
+
+target triple = "hexagon-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b3
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = load <3 x i8>, <3 x i8>* undef, align 8
+  %v1 = zext <3 x i8> %v0 to <3 x i16>
+  store <3 x i16> %v1, <3 x i16>* undef, align 8
+  br label %b2
+
+b3:                                               ; preds = %b0
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/vect_setcc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vect_setcc.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vect_setcc.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vect_setcc.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,92 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; REQUIRES: asserts
+; CHECK: f0:
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readonly
+define void @f0(i16* nocapture %a0) #0 {
+b0:
+  %v0 = alloca [16 x i16], align 8
+  %v1 = load i16, i16* %a0, align 2, !tbaa !0
+  %v2 = getelementptr [16 x i16], [16 x i16]* %v0, i32 0, i32 5
+  br label %b12
+
+b1:                                               ; preds = %b11
+  %v3 = icmp slt i16 %v1, 46
+  br i1 %v3, label %b3, label %b2
+
+b2:                                               ; preds = %b1
+  br label %b5
+
+b3:                                               ; preds = %b1
+  br label %b4
+
+b4:                                               ; preds = %b4, %b3
+  %v4 = phi i32 [ %v6, %b4 ], [ 0, %b3 ]
+  %v5 = getelementptr inbounds [16 x i16], [16 x i16]* %v0, i32 0, i32 %v4
+  store i16 1, i16* %v5, align 2, !tbaa !0
+  %v6 = add nsw i32 %v4, 1
+  %v7 = icmp eq i32 %v6, 16
+  br i1 %v7, label %b8, label %b4
+
+b5:                                               ; preds = %b7, %b2
+  %v8 = phi i32 [ %v12, %b7 ], [ 0, %b2 ]
+  %v9 = getelementptr inbounds [16 x i16], [16 x i16]* %v0, i32 0, i32 %v8
+  %v10 = load i16, i16* %v9, align 2, !tbaa !0
+  %v11 = icmp slt i16 %v10, 13
+  br i1 %v11, label %b6, label %b7
+
+b6:                                               ; preds = %b5
+  store i16 1, i16* %v9, align 2, !tbaa !0
+  br label %b7
+
+b7:                                               ; preds = %b6, %b5
+  %v12 = add nsw i32 %v8, 1
+  %v13 = icmp eq i32 %v12, 16
+  br i1 %v13, label %b9, label %b5
+
+b8:                                               ; preds = %b4
+  br label %b10
+
+b9:                                               ; preds = %b7
+  br label %b10
+
+b10:                                              ; preds = %b11, %b9, %b8
+  ret void
+
+b11:                                              ; preds = %b12
+  %v14 = add <2 x i32> %v31, %v32
+  %v15 = extractelement <2 x i32> %v14, i32 0
+  %v16 = extractelement <2 x i32> %v14, i32 1
+  %v17 = add i32 %v16, %v15
+  %v18 = icmp eq i32 %v17, 1
+  br i1 %v18, label %b1, label %b10
+
+b12:                                              ; preds = %b12, %b0
+  %v19 = phi <2 x i32> [ zeroinitializer, %b0 ], [ %v31, %b12 ]
+  %v20 = phi <2 x i32> [ zeroinitializer, %b0 ], [ %v32, %b12 ]
+  %v21 = phi i16* [ %v2, %b0 ], [ %v35, %b12 ]
+  %v22 = phi i32 [ 0, %b0 ], [ %v33, %b12 ]
+  %v23 = bitcast i16* %v21 to <4 x i16>*
+  %v24 = load <4 x i16>, <4 x i16>* %v23, align 2
+  %v25 = icmp sgt <4 x i16> %v24, <i16 11, i16 11, i16 11, i16 11>
+  %v26 = zext <4 x i1> %v25 to <4 x i16>
+  %v27 = shufflevector <4 x i16> %v26, <4 x i16> undef, <2 x i32> <i32 2, i32 3>
+  %v28 = shufflevector <4 x i16> %v26, <4 x i16> undef, <2 x i32> <i32 0, i32 1>
+  %v29 = zext <2 x i16> %v28 to <2 x i32>
+  %v30 = zext <2 x i16> %v27 to <2 x i32>
+  %v31 = add <2 x i32> %v19, %v29
+  %v32 = add <2 x i32> %v20, %v30
+  %v33 = add nsw i32 %v22, 4
+  %v34 = icmp slt i32 %v22, 4
+  %v35 = getelementptr i16, i16* %v21, i32 4
+  br i1 %v34, label %b12, label %b11
+}
+
+attributes #0 = { nounwind readonly "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/vect_setcc_v2i16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vect_setcc_v2i16.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vect_setcc_v2i16.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vect_setcc_v2i16.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,106 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; REQUIRES: asserts
+; CHECK: f0
+
+target triple = "hexagon"
+
+ at g0 = internal unnamed_addr global [24 x i16] zeroinitializer, align 8
+
+; Function Attrs: nounwind
+define void @f0(i16* nocapture %a0) #0 {
+b0:
+  %v0 = alloca [128 x i16], align 8
+  %v1 = alloca [16 x i16], align 8
+  %v2 = bitcast [128 x i16]* %v0 to i8*
+  call void @llvm.lifetime.start.p0i8(i64 256, i8* %v2) #2
+  %v3 = getelementptr [128 x i16], [128 x i16]* %v0, i32 0, i32 80
+  br label %b8
+
+b1:                                               ; preds = %b3
+  br label %b2
+
+b2:                                               ; preds = %b4, %b1
+  call void @llvm.lifetime.end.p0i8(i64 256, i8* %v2) #2
+  ret void
+
+b3:                                               ; preds = %b5, %b3
+  %v4 = phi i16* [ %v26, %b5 ], [ %v9, %b3 ]
+  %v5 = phi i32 [ 0, %b5 ], [ %v7, %b3 ]
+  %v6 = bitcast i16* %v4 to <4 x i16>*
+  store <4 x i16> <i16 1, i16 1, i16 1, i16 1>, <4 x i16>* %v6, align 8
+  %v7 = add nsw i32 %v5, 4
+  %v8 = icmp slt i32 %v5, 12
+  %v9 = getelementptr i16, i16* %v4, i32 4
+  br i1 %v8, label %b3, label %b1
+
+b4:                                               ; preds = %b6
+  %v10 = getelementptr [16 x i16], [16 x i16]* %v1, i32 0, i32 13
+  %v11 = bitcast i16* %v10 to <2 x i16>*
+  %v12 = load <2 x i16>, <2 x i16>* %v11, align 2
+  %v13 = icmp sgt <2 x i16> %v12, <i16 11, i16 11>
+  %v14 = zext <2 x i1> %v13 to <2 x i32>
+  %v15 = add <2 x i32> %v39, %v14
+  %v16 = add <2 x i32> %v15, %v40
+  %v17 = extractelement <2 x i32> %v16, i32 0
+  %v18 = extractelement <2 x i32> %v16, i32 1
+  %v19 = getelementptr [16 x i16], [16 x i16]* %v1, i32 0, i32 15
+  %v20 = load i16, i16* %v19, align 2
+  %v21 = icmp sgt i16 %v20, 11
+  %v22 = zext i1 %v21 to i32
+  %v23 = add i32 %v18, %v22
+  %v24 = add i32 %v23, %v17
+  %v25 = icmp slt i32 %v24, 5
+  br i1 %v25, label %b5, label %b2
+
+b5:                                               ; preds = %b4
+  %v26 = getelementptr [16 x i16], [16 x i16]* %v1, i32 0, i32 0
+  br label %b3
+
+b6:                                               ; preds = %b7, %b6
+  %v27 = phi <2 x i32> [ zeroinitializer, %b7 ], [ %v40, %b6 ]
+  %v28 = phi <2 x i32> [ zeroinitializer, %b7 ], [ %v39, %b6 ]
+  %v29 = phi i16* [ %v44, %b7 ], [ %v43, %b6 ]
+  %v30 = phi i32 [ 0, %b7 ], [ %v41, %b6 ]
+  %v31 = bitcast i16* %v29 to <4 x i16>*
+  %v32 = load <4 x i16>, <4 x i16>* %v31, align 2
+  %v33 = icmp sgt <4 x i16> %v32, <i16 11, i16 11, i16 11, i16 11>
+  %v34 = zext <4 x i1> %v33 to <4 x i16>
+  %v35 = shufflevector <4 x i16> %v34, <4 x i16> undef, <2 x i32> <i32 2, i32 3>
+  %v36 = shufflevector <4 x i16> %v34, <4 x i16> undef, <2 x i32> <i32 0, i32 1>
+  %v37 = zext <2 x i16> %v36 to <2 x i32>
+  %v38 = zext <2 x i16> %v35 to <2 x i32>
+  %v39 = add <2 x i32> %v28, %v37
+  %v40 = add <2 x i32> %v27, %v38
+  %v41 = add nsw i32 %v30, 4
+  %v42 = icmp slt i32 %v30, 4
+  %v43 = getelementptr i16, i16* %v29, i32 4
+  br i1 %v42, label %b6, label %b4
+
+b7:                                               ; preds = %b8
+  %v44 = getelementptr [16 x i16], [16 x i16]* %v1, i32 0, i32 5
+  br label %b6
+
+b8:                                               ; preds = %b8, %b0
+  %v45 = phi i16* [ %v3, %b0 ], [ %v53, %b8 ]
+  %v46 = phi i16* [ getelementptr inbounds ([24 x i16], [24 x i16]* @g0, i32 0, i32 0), %b0 ], [ %v54, %b8 ]
+  %v47 = phi i32 [ 0, %b0 ], [ %v51, %b8 ]
+  %v48 = bitcast i16* %v45 to <4 x i16>*
+  %v49 = load <4 x i16>, <4 x i16>* %v48, align 8
+  %v50 = bitcast i16* %v46 to <4 x i16>*
+  store <4 x i16> %v49, <4 x i16>* %v50, align 8
+  %v51 = add nsw i32 %v47, 4
+  %v52 = icmp slt i32 %v47, 20
+  %v53 = getelementptr i16, i16* %v45, i32 4
+  %v54 = getelementptr i16, i16* %v46, i32 4
+  br i1 %v52, label %b8, label %b7
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { argmemonly nounwind }
+attributes #2 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/verify-sink-code.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/verify-sink-code.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/verify-sink-code.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/verify-sink-code.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,164 @@
+; RUN: llc -O3 -march=hexagon -verify-machineinstrs < %s
+; REQUIRES: asserts
+; Check for successful compilation.
+
+target triple = "hexagon"
+
+%s.0 = type { %s.1, [128 x %s.0*], i32, i32, i32, %s.6, i32, i32, i32, i32, i32, i32, i32, i32, i32, [1 x %s.9], %s.9*, [1 x %s.12], %s.12*, i32, [4 x [4 x [4 x i32]]*], [2 x [8 x [8 x i32]]*], [4 x [16 x i32]*], [2 x [64 x i32]*], [4 x [16 x i16]*], [2 x [64 x i16]*], [4 x [16 x i16]*], [2 x [64 x i16]*], [2 x [64 x i32]], [2 x [64 x i32]], [2 x i32], %s.13, %s.15, %s.16, %s.17*, %s.17*, i32, [19 x %s.17*], i32, [19 x %s.17*], [2 x i32], [4 x i8], %s.18, %s.20, %s.23*, %s.24, [7 x void (i8*)*], [7 x void (i8*)*], [12 x void (i8*, i8*)*], [12 x void (i8*)*], %s.26, %s.27, %s.28, %s.29, %s.30, %s.32, %s.33, [5 x %s.34*], %s.34*, [15 x %s.34*], [3 x %s.34*], [7 x %s.34*], [8 x i8] }
+%s.1 = type { i32, i32, i32, i32, i32, i32, i32, i32, %s.2, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, [16 x i8], [16 x i8], [16 x i8], [16 x i8], [64 x i8], [64 x i8], void (i8*, i32, i8*, i8*)*, i8*, i32, i32, %s.3, %s.4, i32, i32, i32 }
+%s.2 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%s.3 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], i32, i32 }
+%s.4 = type { i32, i32, i32, i32, i32, i32, float, float, i32, i32, float, float, float, i32, i8*, i32, i8*, i8*, float, float, float, %s.5*, i32, i8* }
+%s.5 = type { i32, i32, i32, i32, float }
+%s.6 = type { i32, [8 x %s.7], i32, i8*, %s.8, i32 }
+%s.7 = type { i32, i32, i32, i8* }
+%s.8 = type { i8*, i8*, i8*, i32, i32 }
+%s.9 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [256 x i32], i32, i32, i32, i32, i32, i32, i32, i32, %s.10, i32, %s.11, i32 }
+%s.10 = type { i32, i32, i32, i32 }
+%s.11 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%s.12 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [6 x i8*] }
+%s.13 = type { %s.9*, %s.12*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], i32, i32, i32, i32, i32, i32, i32, [2 x [16 x %s.14]], i32, i32, i32, i32, i32, i32, i32, i32 }
+%s.14 = type { i32, i32 }
+%s.15 = type { [460 x i8], i32, i32, i32, i32, i32, i8*, i8*, i8* }
+%s.16 = type { [19 x %s.17*], [19 x %s.17*], [292 x %s.17*], %s.17*, [18 x %s.17*], i32, i32, i32, i32, i32, i32, i32 }
+%s.17 = type { i32, i32, i32, i64, i32, i32, i32, float, i32, [4 x i32], [4 x i32], i32, i32, [4 x i8*], [4 x i8*], [4 x i8*], i16*, [8 x i8*], [4 x i8*], i8*, [2 x [2 x i16]*], [2 x i8*], [2 x i32], [2 x [16 x i32]], [18 x [18 x i32]], i32, [18 x i32], [18 x [18 x i32*]], i32*, i32*, i32*, i32, i32, i32, i32 }
+%s.18 = type { [16 x i32], [2 x [4 x i32]], [4 x [64 x i32]], [24 x %s.19] }
+%s.19 = type { [16 x i32] }
+%s.20 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], [2 x i32], [2 x i32], [2 x i32], [2 x i32], [2 x i32], i32, [4 x i32], [16 x i32], i32, i32, i32, i32, i32, i32, i8*, i8*, i16*, [7 x i8]*, [24 x i8]*, i8*, [2 x [2 x i16]*], [2 x [2 x i16]*], [2 x i8*], [2 x [32 x [2 x i16]*]], i8*, i8*, [2 x [3 x i8*]], [16 x i8]*, i32, i32, [4 x i32], i32, i32, i32, i32, i32, [8 x i8], %s.21, %s.22, i32, i32, i32, i32, i32, i32, i32, i32, [16 x [2 x i32]], [32 x [4 x i32]], [2 x i32], [16 x i32], [4 x i8] }
+%s.21 = type { [384 x i8], [864 x i8], [3 x i8*], [3 x i8*], [2 x i32], [2 x [32 x [6 x i8*]]], [2 x [16 x i16*]], [3 x i32], [4 x i8] }
+%s.22 = type { [48 x i32], [48 x i32], [2 x [48 x i8]], [2 x [48 x [2 x i16]]], [2 x [48 x [2 x i16]]], [48 x i8], [2 x [48 x [2 x i16]]], [2 x [48 x i8]], [2 x i32], i32, i32, i32 }
+%s.23 = type opaque
+%s.24 = type { %s.25, [5 x i32], [5 x i64], [5 x i32], [5 x i64], [5 x float], [5 x float], [5 x float], [5 x float], [5 x float], [5 x [19 x i64]], [2 x i64], [2 x [7 x i64]], [2 x [32 x i64]], [2 x i32], [2 x i32] }
+%s.25 = type { i32, i32, i32, i32, [19 x i32], i32, i32, i32, [2 x i32], [7 x i32], [32 x i32], i32, i32, i32, [2 x i32] }
+%s.26 = type { [7 x i32 (i8*, i32, i8*, i32)*], [7 x i32 (i8*, i32, i8*, i32)*], [7 x i32 (i8*, i32, i8*, i32)*], [7 x i32 (i8*, i32, i8*, i32)*], [4 x i32 (i8*, i32, i8*, i32)*], [7 x i32 (i8*, i32, i8*, i32)*], [7 x i32 (i8*, i32, i8*, i32)*], void (i8*, i32, i8*, i32, [4 x i32]*)*, float ([4 x i32]*, [4 x i32]*, i32)*, [7 x i32 (i8*, i32, i8*, i32, i32)*], [7 x void (i8*, i8*, i8*, i8*, i32, i32*)*], [7 x void (i8*, i8*, i8*, i8*, i8*, i32, i32*)*], [7 x void (i32*, i16*, i32, i16*, i32)*], void (i8*, i8*, i32*)*, void (i8*, i8*, i32*)*, void (i8*, i8*, i32*)*, void (i8*, i8*, i32*)* }
+%s.27 = type { void (i8**, i32, i8*, i32, i32, i32, i32, i32)*, i8* (i8**, i32, i8*, i32*, i32, i32, i32, i32)*, void (i8*, i32, i8*, i32, i32, i32, i32, i32)*, [10 x void (i8*, i32, i8*, i32)*], [10 x void (i8*, i32, i8*, i32, i32)*], [7 x void (i8*, i32, i8*, i32, i32)*], void (i8*, i32, i8*, i32, i32, i32)*, void (i8*, i32, i8*, i32, i32)*, void (i8*, i32, i32)* }
+%s.28 = type { void ([4 x i16]*, i8*, i8*)*, void (i8*, [4 x i16]*)*, void ([4 x [4 x i16]]*, i8*, i8*)*, void (i8*, [4 x [4 x i16]]*)*, void ([4 x [4 x i16]]*, i8*, i8*)*, void (i8*, [4 x [4 x i16]]*)*, void ([8 x i16]*, i8*, i8*)*, void (i8*, [8 x i16]*)*, void ([8 x [8 x i16]]*, i8*, i8*)*, void (i8*, [8 x [8 x i16]]*)*, void ([4 x i16]*)*, void ([4 x i16]*)*, void ([2 x i16]*)*, void ([2 x i16]*)* }
+%s.29 = type { void (i32*, [8 x i16]*)*, void (i32*, [4 x i16]*)*, void (i32*, [4 x i16]*)*, void (i32*, i8*, i8*)*, void (i32*, i8*, i8*)* }
+%s.30 = type { [9 x void (%s.27*, %s.17*, %s.31*, i32, i32)*] }
+%s.31 = type { i32, i32, [4 x i32], [4 x i8*] }
+%s.32 = type { void ([8 x i16]*, i16*, i16*)*, void ([4 x i16]*, i16*, i16*)*, void ([4 x i16]*, i32, i32)*, void ([2 x i16]*, i32, i32)*, void ([4 x i16]*, [4 x [4 x i32]]*, i32)*, void ([8 x i16]*, [8 x [8 x i32]]*, i32)* }
+%s.33 = type { void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32)*, void (i8*, i32, i32, i32)*, void (i8*, i32, i32, i32)*, void (i8*, i32, i32, i32)* }
+%s.34 = type opaque
+
+ at g0 = private unnamed_addr constant [148 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 8
+ at g1 = private unnamed_addr constant [27 x i8] c"yyyyyyyyyyyyyyyyyyyyyyyyyy\00", align 8
+ at g2 = private unnamed_addr constant [148 x i8] c"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\00", align 8
+
+; Function Attrs: nounwind
+define void @f0(%s.0* %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 1
+  %v1 = load i32, i32* %v0, align 4
+  %v2 = mul nsw i32 %v1, %a2
+  %v3 = add nsw i32 %v2, %a1
+  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 3
+  %v5 = load i32, i32* %v4, align 4
+  %v6 = mul nsw i32 %v5, %a2
+  %v7 = add nsw i32 %v6, %a1
+  %v8 = mul nsw i32 %v7, 4
+  %v9 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 2
+  %v10 = load i32, i32* %v9, align 4
+  %v11 = mul nsw i32 %v10, %a2
+  %v12 = add nsw i32 %v11, %a1
+  %v13 = mul nsw i32 %v12, 2
+  %v14 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 14
+  %v15 = load i32, i32* %v14, align 4
+  %v16 = shl i32 1, %v15
+  %v17 = sub nsw i32 %a2, %v16
+  %v18 = mul nsw i32 %v17, %v1
+  %v19 = add nsw i32 %v18, %a1
+  %v20 = mul nsw i32 %v1, 2
+  %v21 = icmp eq i32 %v10, %v20
+  br i1 %v21, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  tail call void @f1(i8* getelementptr inbounds ([148 x i8], [148 x i8]* @g0, i32 0, i32 0), i8* getelementptr inbounds ([27 x i8], [27 x i8]* @g1, i32 0, i32 0)) #2
+  %v22 = load i32, i32* %v4, align 4
+  %v23 = load i32, i32* %v0, align 4
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v24 = phi i32 [ %v1, %b0 ], [ %v23, %b1 ]
+  %v25 = phi i32 [ %v5, %b0 ], [ %v22, %b1 ]
+  %v26 = mul nsw i32 %v24, 4
+  %v27 = icmp eq i32 %v25, %v26
+  br i1 %v27, label %b4, label %b3
+
+b3:                                               ; preds = %b2
+  tail call void @f1(i8* getelementptr inbounds ([148 x i8], [148 x i8]* @g2, i32 0, i32 0), i8* getelementptr inbounds ([27 x i8], [27 x i8]* @g1, i32 0, i32 0)) #2
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2
+  %v28 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 4
+  store i32 %a1, i32* %v28, align 4
+  %v29 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 5
+  store i32 %a2, i32* %v29, align 4
+  %v30 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 6
+  store i32 %v3, i32* %v30, align 4
+  %v31 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 7
+  store i32 %v13, i32* %v31, align 4
+  %v32 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 8
+  store i32 %v8, i32* %v32, align 4
+  %v33 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 29
+  store i32 %v19, i32* %v33, align 4
+  %v34 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 21
+  store i32 0, i32* %v34, align 4
+  %v35 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 31, i32 3
+  %v36 = load i32, i32* %v35, align 4
+  %v37 = icmp slt i32 %v19, %v36
+  br i1 %v37, label %b6, label %b5
+
+b5:                                               ; preds = %b4
+  %v38 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 30
+  %v39 = load i8*, i8** %v38, align 4
+  %v40 = getelementptr inbounds i8, i8* %v39, i32 %v19
+  %v41 = load i8, i8* %v40, align 1
+  %v42 = sext i8 %v41 to i32
+  %v43 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 24
+  store i32 %v42, i32* %v43, align 4
+  store i32 2, i32* %v34, align 4
+  %v44 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 33
+  %v45 = load [7 x i8]*, [7 x i8]** %v44, align 4
+  %v46 = getelementptr inbounds [7 x i8], [7 x i8]* %v45, i32 %v19, i32 0
+  %v47 = load i8, i8* %v46, align 1
+  %v48 = sext i8 %v47 to i32
+  %v49 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 54, i32 0, i32 4
+  store i32 %v48, i32* %v49, align 4
+  %v50 = getelementptr inbounds [7 x i8], [7 x i8]* %v45, i32 %v19, i32 1
+  %v51 = load i8, i8* %v50, align 1
+  %v52 = sext i8 %v51 to i32
+  %v53 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 54, i32 0, i32 5
+  store i32 %v52, i32* %v53, align 4
+  %v54 = getelementptr inbounds [7 x i8], [7 x i8]* %v45, i32 %v19, i32 2
+  %v55 = load i8, i8* %v54, align 1
+  %v56 = sext i8 %v55 to i32
+  %v57 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 54, i32 0, i32 6
+  store i32 %v56, i32* %v57, align 4
+  %v58 = getelementptr inbounds [7 x i8], [7 x i8]* %v45, i32 %v19, i32 3
+  %v59 = load i8, i8* %v58, align 1
+  %v60 = sext i8 %v59 to i32
+  %v61 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 54, i32 0, i32 7
+  store i32 %v60, i32* %v61, align 4
+  br label %b7
+
+b6:                                               ; preds = %b4
+  %v62 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 24
+  store i32 -1, i32* %v62, align 4
+  %v63 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 43, i32 54, i32 0, i32 4
+  %v64 = bitcast i32* %v63 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 4 %v64, i8 -1, i64 16, i1 false)
+  br label %b7
+
+b7:                                               ; preds = %b6, %b5
+  ret void
+}
+
+; Function Attrs: nounwind
+declare void @f1(i8*, i8*) #0
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { argmemonly nounwind }
+attributes #2 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/verify-undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/verify-undef.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/verify-undef.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/verify-undef.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,35 @@
+; RUN: llc -march=hexagon -enable-pipeliner -verify-machineinstrs < %s
+; REQUIRES: asserts
+
+; This test fails in the machine verifier because the verifier thinks the
+; return register is undefined, and because there is a basic block that
+; ends with an unconditional branch that is not marked as a barrier.
+;
+; Enabling SWP exposes these bugs because the live variable analysis is
+; performed earlier than the process implicit def pass.  This ordering
+; causes the JMPR machine instruction to contain two R0 operands, one
+; with an undef and one with a kill flag.
+
+ at g0 = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0) #0 {
+b0:
+  %v0 = icmp eq i32 %a0, 0
+  br i1 %v0, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v1 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32)*)(i32 %a0) #0
+  br label %b3
+
+b2:                                               ; preds = %b0
+  store i32 0, i32* @g0, align 4
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  ret i32 undef
+}
+
+declare i32 @f1(...)
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/vmemu-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vmemu-128.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vmemu-128.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vmemu-128.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,25 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+
+; Test that unaligned load is enabled for 128B
+; CHECK-NOT: r{{[0-9]+}} = memw
+
+; Function Attrs: nounwind
+define void @f0(i8* noalias nocapture readonly %a0, i16* nocapture %a1) #0 {
+b0:
+  %v0 = bitcast i8* %a0 to <32 x i32>*
+  %v1 = load <32 x i32>, <32 x i32>* %v0, align 4, !tbaa !0
+  %v2 = tail call <32 x i32> @llvm.hexagon.V6.vrmpyub.128B(<32 x i32> %v1, i32 16843009)
+  %v3 = bitcast i16* %a1 to <32 x i32>*
+  store <32 x i32> %v2, <32 x i32>* %v3, align 128, !tbaa !0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vrmpyub.128B(<32 x i32>, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/vrcmpys.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vrcmpys.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vrcmpys.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vrcmpys.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,77 @@
+; RUN: llc -march=hexagon --filetype=obj < %s -o - | llvm-objdump -d - | FileCheck %s
+
+ at g0 = common global double 0.000000e+00, align 8
+ at g1 = common global double 0.000000e+00, align 8
+
+; CHECK-LABEL: f0:
+; CHECK: r{{[0-9]}}:{{[0-9]}} += vrcmpys(r{{[0-9]}}:{{[0-9]}},r{{[0-9]}}:{{[0-9]}}):<<1:sat:raw:lo
+define double @f0(i32 %a0, i32 %a1) {
+b0:
+  %v0 = load double, double* @g0, align 8, !tbaa !0
+  %v1 = fptosi double %v0 to i64
+  %v2 = load double, double* @g1, align 8, !tbaa !0
+  %v3 = fptosi double %v2 to i64
+  %v4 = tail call i64 @llvm.hexagon.M2.vrcmpys.acc.s1(i64 %v1, i64 %v3, i32 %a0)
+  %v5 = sitofp i64 %v4 to double
+  ret double %v5
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vrcmpys.acc.s1(i64, i64, i32) #0
+
+; CHECK-LABEL: f1:
+; CHECK: r{{[0-9]}}:{{[0-9]}} += vrcmpys(r{{[0-9]}}:{{[0-9]}},r{{[0-9]}}:{{[0-9]}}):<<1:sat:raw:hi
+define double @f1(i32 %a0, i32 %a1) {
+b0:
+  %v0 = load double, double* @g0, align 8, !tbaa !0
+  %v1 = fptosi double %v0 to i64
+  %v2 = load double, double* @g1, align 8, !tbaa !0
+  %v3 = fptosi double %v2 to i64
+  %v4 = tail call i64 @llvm.hexagon.M2.vrcmpys.acc.s1(i64 %v1, i64 %v3, i32 %a1)
+  %v5 = sitofp i64 %v4 to double
+  ret double %v5
+}
+
+; CHECK-LABEL: f2:
+; CHECK: r{{[0-9]}}:{{[0-9]}} = vrcmpys(r{{[0-9]}}:{{[0-9]}},r{{[0-9]}}:{{[0-9]}}):<<1:sat:raw:lo
+define double @f2(i32 %a0, i32 %a1) {
+b0:
+  %v0 = load double, double* @g1, align 8, !tbaa !0
+  %v1 = fptosi double %v0 to i64
+  %v2 = tail call i64 @llvm.hexagon.M2.vrcmpys.s1(i64 %v1, i32 %a0)
+  %v3 = sitofp i64 %v2 to double
+  ret double %v3
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vrcmpys.s1(i64, i32) #0
+
+; CHECK-LABEL: f3:
+; CHECK: r{{[0-9]}}:{{[0-9]}} = vrcmpys(r{{[0-9]}}:{{[0-9]}},r{{[0-9]}}:{{[0-9]}}):<<1:sat:raw:hi
+define double @f3(i32 %a0, i32 %a1) {
+b0:
+  %v0 = load double, double* @g1, align 8, !tbaa !0
+  %v1 = fptosi double %v0 to i64
+  %v2 = tail call i64 @llvm.hexagon.M2.vrcmpys.s1(i64 %v1, i32 %a1)
+  %v3 = sitofp i64 %v2 to double
+  ret double %v3
+}
+
+; CHECK-LABEL: f4:
+; CHECK: e9a4c2e0 { r0 = vrcmpys(r5:4,r3:2):<<1:rnd:sat:raw:lo }
+; CHECK: e9a4c2c0 { r0 = vrcmpys(r5:4,r3:2):<<1:rnd:sat:raw:hi }
+define void @f4() {
+b0:
+  call void asm sideeffect "r0=vrcmpys(r5:4,r2):<<1:rnd:sat", ""(), !srcloc !4
+  call void asm sideeffect "r0=vrcmpys(r5:4,r3):<<1:rnd:sat", ""(), !srcloc !5
+  ret void
+}
+
+attributes #0 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"double", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{i32 25}
+!5 = !{i32 71}

Added: llvm/trunk/test/CodeGen/Hexagon/vsplat-ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vsplat-ext.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vsplat-ext.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vsplat-ext.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,30 @@
+; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
+
+; Hexagon's vsplatb/vsplath only consider the lower 8/16 bits of the source
+; register.  Any extension of the source is not necessary.
+
+; CHECK-NOT: zxtb
+; CHECK-NOT: zxth
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readnone
+define i64 @f0(i64 %a0) #0 {
+b0:
+  %v0 = trunc i64 %a0 to i32
+  %v1 = and i32 %v0, 65535
+  %v2 = tail call i64 @llvm.hexagon.S2.vsplatrh(i32 %v1)
+  %v3 = and i32 %v0, 255
+  %v4 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v3)
+  %v5 = sext i32 %v4 to i64
+  %v6 = add nsw i64 %v5, %v2
+  ret i64 %v6
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.vsplatrh(i32) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.vsplatrb(i32) #0
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/wcsrtomb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/wcsrtomb.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/wcsrtomb.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/wcsrtomb.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,183 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+%s.0 = type { i32, i16, i16 }
+
+; Function Attrs: nounwind
+define i32 @f0(i8* %a0, i32** nocapture %a1, i32 %a2, %s.0* %a3) #0 {
+b0:
+  %v0 = alloca [8 x i8], align 8
+  %v1 = load i32*, i32** %a1, align 4, !tbaa !0
+  %v2 = icmp eq %s.0* %a3, null
+  br i1 %v2, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v3 = call %s.0* bitcast (%s.0* (...)* @f1 to %s.0* ()*)() #1
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v4 = phi %s.0* [ %v3, %b1 ], [ %a3, %b0 ]
+  %v5 = icmp eq i8* %a0, null
+  br i1 %v5, label %b5, label %b3
+
+b3:                                               ; preds = %b2
+  %v6 = icmp eq i32 %a2, 0
+  br i1 %v6, label %b23, label %b4
+
+b4:                                               ; preds = %b3
+  %v7 = getelementptr inbounds [8 x i8], [8 x i8]* %v0, i32 0, i32 0
+  %v8 = getelementptr inbounds %s.0, %s.0* %v4, i32 0, i32 0
+  %v9 = getelementptr inbounds %s.0, %s.0* %v4, i32 0, i32 1
+  %v10 = getelementptr inbounds %s.0, %s.0* %v4, i32 0, i32 2
+  %v11 = bitcast i16* %v9 to i32*
+  br label %b11
+
+b5:                                               ; preds = %b2
+  %v12 = getelementptr inbounds [8 x i8], [8 x i8]* %v0, i32 0, i32 0
+  %v13 = load i32, i32* %v1, align 4, !tbaa !4
+  %v14 = call i32 @f2(i8* %v12, i32 %v13, %s.0* %v4) #1
+  %v15 = icmp slt i32 %v14, 0
+  br i1 %v15, label %b25, label %b6
+
+b6:                                               ; preds = %b5
+  br label %b7
+
+b7:                                               ; preds = %b10, %b6
+  %v16 = phi i32 [ %v29, %b10 ], [ %v14, %b6 ]
+  %v17 = phi i32 [ %v26, %b10 ], [ 0, %b6 ]
+  %v18 = phi i32* [ %v27, %b10 ], [ %v1, %b6 ]
+  %v19 = icmp sgt i32 %v16, 0
+  br i1 %v19, label %b8, label %b10
+
+b8:                                               ; preds = %b7
+  %v20 = add nsw i32 %v16, -1
+  %v21 = getelementptr inbounds [8 x i8], [8 x i8]* %v0, i32 0, i32 %v20
+  %v22 = load i8, i8* %v21, align 1, !tbaa !6
+  %v23 = icmp eq i8 %v22, 0
+  br i1 %v23, label %b9, label %b10
+
+b9:                                               ; preds = %b8
+  %v24 = add i32 %v17, -1
+  %v25 = add i32 %v24, %v16
+  br label %b25
+
+b10:                                              ; preds = %b8, %b7
+  %v26 = add i32 %v16, %v17
+  %v27 = getelementptr inbounds i32, i32* %v18, i32 1
+  %v28 = load i32, i32* %v27, align 4, !tbaa !4
+  %v29 = call i32 @f2(i8* %v12, i32 %v28, %s.0* %v4) #1
+  %v30 = icmp slt i32 %v29, 0
+  br i1 %v30, label %b24, label %b7
+
+b11:                                              ; preds = %b21, %b4
+  %v31 = phi i8* [ %a0, %b4 ], [ %v64, %b21 ]
+  %v32 = phi i32 [ %a2, %b4 ], [ %v65, %b21 ]
+  %v33 = phi i32 [ 0, %b4 ], [ %v62, %b21 ]
+  %v34 = phi i32* [ %v1, %b4 ], [ %v63, %b21 ]
+  %v35 = phi i32 [ undef, %b4 ], [ %v47, %b21 ]
+  %v36 = phi i16 [ undef, %b4 ], [ %v46, %b21 ]
+  %v37 = phi i16 [ undef, %b4 ], [ %v45, %b21 ]
+  %v38 = call i32 @f3() #1
+  %v39 = icmp ult i32 %v32, %v38
+  br i1 %v39, label %b12, label %b13
+
+b12:                                              ; preds = %b11
+  %v40 = load i32, i32* %v8, align 4
+  %v41 = load i32, i32* %v11, align 4
+  %v42 = trunc i32 %v41 to i16
+  %v43 = lshr i32 %v41, 16
+  %v44 = trunc i32 %v43 to i16
+  br label %b13
+
+b13:                                              ; preds = %b12, %b11
+  %v45 = phi i16 [ %v44, %b12 ], [ %v37, %b11 ]
+  %v46 = phi i16 [ %v42, %b12 ], [ %v36, %b11 ]
+  %v47 = phi i32 [ %v40, %b12 ], [ %v35, %b11 ]
+  %v48 = phi i8* [ %v7, %b12 ], [ %v31, %b11 ]
+  %v49 = load i32, i32* %v34, align 4, !tbaa !4
+  %v50 = call i32 @f2(i8* %v48, i32 %v49, %s.0* %v4) #1
+  %v51 = icmp slt i32 %v50, 0
+  br i1 %v51, label %b22, label %b14
+
+b14:                                              ; preds = %b13
+  %v52 = icmp eq i8* %v31, %v48
+  br i1 %v52, label %b18, label %b15
+
+b15:                                              ; preds = %b14
+  %v53 = icmp ult i32 %v32, %v50
+  br i1 %v53, label %b16, label %b17
+
+b16:                                              ; preds = %b15
+  store i32 %v47, i32* %v8, align 4
+  store i16 %v46, i16* %v9, align 4
+  store i16 %v45, i16* %v10, align 2
+  br label %b23
+
+b17:                                              ; preds = %b15
+  %v54 = call i8* @f4(i8* %v31, i8* %v7, i32 %v50) #1
+  br label %b18
+
+b18:                                              ; preds = %b17, %b14
+  %v55 = icmp sgt i32 %v50, 0
+  br i1 %v55, label %b19, label %b21
+
+b19:                                              ; preds = %b18
+  %v56 = add nsw i32 %v50, -1
+  %v57 = getelementptr inbounds i8, i8* %v31, i32 %v56
+  %v58 = load i8, i8* %v57, align 1, !tbaa !6
+  %v59 = icmp eq i8 %v58, 0
+  br i1 %v59, label %b20, label %b21
+
+b20:                                              ; preds = %b19
+  store i32* null, i32** %a1, align 4, !tbaa !0
+  %v60 = add i32 %v33, -1
+  %v61 = add i32 %v60, %v50
+  br label %b25
+
+b21:                                              ; preds = %b19, %b18
+  %v62 = add i32 %v50, %v33
+  %v63 = getelementptr inbounds i32, i32* %v34, i32 1
+  %v64 = getelementptr inbounds i8, i8* %v31, i32 %v50
+  %v65 = sub i32 %v32, %v50
+  %v66 = icmp eq i32 %v32, %v50
+  br i1 %v66, label %b22, label %b11
+
+b22:                                              ; preds = %b21, %b13
+  %v67 = phi i32* [ %v34, %b13 ], [ %v63, %b21 ]
+  %v68 = phi i32 [ -1, %b13 ], [ %v62, %b21 ]
+  br label %b23
+
+b23:                                              ; preds = %b22, %b16, %b3
+  %v69 = phi i32* [ %v34, %b16 ], [ %v1, %b3 ], [ %v67, %b22 ]
+  %v70 = phi i32 [ %v33, %b16 ], [ 0, %b3 ], [ %v68, %b22 ]
+  store i32* %v69, i32** %a1, align 4, !tbaa !0
+  br label %b25
+
+b24:                                              ; preds = %b10
+  br label %b25
+
+b25:                                              ; preds = %b24, %b23, %b20, %b9, %b5
+  %v71 = phi i32 [ %v25, %b9 ], [ %v70, %b23 ], [ %v61, %b20 ], [ -1, %b5 ], [ -1, %b24 ]
+  ret i32 %v71
+}
+
+declare %s.0* @f1(...)
+
+declare i32 @f2(i8*, i32, %s.0*)
+
+declare i32 @f3()
+
+declare i8* @f4(i8*, i8*, i32)
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"int", !2}
+!6 = !{!2, !2, i64 0}




More information about the llvm-commits mailing list