[llvm] r327884 - [Hexagon] Add a few more lit tests

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 19 14:12:06 PDT 2018


This should be fixed by r327907.

-Krzysztof

On 3/19/2018 3:59 PM, Galina Kistanova wrote:
> Hello Krzysztof,
> 
> This commit added broken test to couple of our builders:
> 
> http://lab.llvm.org:8011/builders/clang-with-thin-lto-ubuntu/builds/9241
> http://lab.llvm.org:8011/builders/clang-with-thin-lto-ubuntu
> 
> . . .
> Failing Tests:
>     . . .
>      LLVM :: CodeGen/Hexagon/v6vec_inc1.ll
> 
> Please have a look?
> 
> The builder was already red and did not send notifications on this.
> 
> Thanks
> 
> Galina
> 
> On Mon, Mar 19, 2018 at 12:03 PM, Krzysztof Parzyszek via llvm-commits 
> <llvm-commits at lists.llvm.org <mailto:llvm-commits at lists.llvm.org>> wrote:
> 
>     Author: kparzysz
>     Date: Mon Mar 19 12:03:18 2018
>     New Revision: 327884
> 
>     URL: http://llvm.org/viewvc/llvm-project?rev=327884&view=rev
>     <http://llvm.org/viewvc/llvm-project?rev=327884&view=rev>
>     Log:
>     [Hexagon] Add a few more lit tests
> 
>     Added:
>          llvm/trunk/test/CodeGen/Hexagon/S3_2op.ll
>          llvm/trunk/test/CodeGen/Hexagon/fp_latency.ll
>          llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_01.ll
>          llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16.ll
>          llvm/trunk/test/CodeGen/Hexagon/postinc-order.ll
>          llvm/trunk/test/CodeGen/Hexagon/swp-change-dep1.ll
>          llvm/trunk/test/CodeGen/Hexagon/swp-conv3x3-nested.ll
>          llvm/trunk/test/CodeGen/Hexagon/swp-dep-neg-offset.ll
>          llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi2.ll
>          llvm/trunk/test/CodeGen/Hexagon/swp-listen-loop3.ll
>          llvm/trunk/test/CodeGen/Hexagon/swp-max-stage3.ll
>          llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-6.ll
>          llvm/trunk/test/CodeGen/Hexagon/swp-sigma.ll
>          llvm/trunk/test/CodeGen/Hexagon/tfr-cleanup.ll
>          llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu1.ll
>          llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu2.ll
>          llvm/trunk/test/CodeGen/Hexagon/v6vec_inc1.ll
>     Modified:
>          llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/S3_2op.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/S3_2op.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/S3_2op.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/S3_2op.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/S3_2op.ll Mon Mar 19 12:03:18 2018
>     @@ -0,0 +1,364 @@
>     +; RUN: llc -march=hexagon -filetype=obj < %s -o - | llvm-objdump -d
>     - | FileCheck %s
>     +
>     +; CHECK-LABEL: f0:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = abs(r{{[1-9]}}:{{[0-9]}})
>     +define double @f0(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.A2.absp(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.A2.absp(i64) #1
>     +
>     +; CHECK-LABEL: f1:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = neg(r{{[1-9]}}:{{[0-9]}})
>     +define double @f1(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.A2.negp(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.A2.negp(i64) #1
>     +
>     +; CHECK-LABEL: f2:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = not(r{{[1-9]}}:{{[0-9]}})
>     +define double @f2(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.A2.notp(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.A2.notp(i64) #1
>     +
>     +; CHECK-LABEL: f3:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = interleave(r{{[1-9]}}:{{[0-9]}})
>     +define double @f3(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.interleave(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.interleave(i64) #1
>     +
>     +; CHECK-LABEL: f4:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = deinterleave(r{{[1-9]}}:{{[0-9]}})
>     +define double @f4(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.deinterleave(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.deinterleave(i64) #1
>     +
>     +; CHECK-LABEL: f5:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vconj(r{{[1-9]}}:{{[0-9]}}):sat
>     +define double @f5(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.A2.vconj(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.A2.vconj(i64) #1
>     +
>     +; CHECK-LABEL: f6:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vsathb(r{{[1-9]}}:{{[0-9]}})
>     +define double @f6(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.vsathb.nopack(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.vsathb.nopack(i64) #1
>     +
>     +; CHECK-LABEL: f7:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vsathub(r{{[1-9]}}:{{[0-9]}})
>     +define double @f7(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.vsathub.nopack(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.vsathub.nopack(i64) #1
>     +
>     +; CHECK-LABEL: f8:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vsatwh(r{{[1-9]}}:{{[0-9]}})
>     +define double @f8(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.vsatwh.nopack(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.vsatwh.nopack(i64) #1
>     +
>     +; CHECK-LABEL: f9:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vsatwuh(r{{[1-9]}}:{{[0-9]}})
>     +define double @f9(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.vsatwuh.nopack(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.vsatwuh.nopack(i64) #1
>     +
>     +; CHECK-LABEL: f10:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = asr(r{{[1-9]}}:{{[0-9]}},#1)
>     +define double @f10(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.asr.i.p(i64 %v2, i32 1)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32) #1
>     +
>     +; CHECK-LABEL: f11:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = lsr(r{{[1-9]}}:{{[0-9]}},#1)
>     +define double @f11(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %v2, i32 1)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32) #1
>     +
>     +; CHECK-LABEL: f12:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = asl(r{{[1-9]}}:{{[0-9]}},#1)
>     +define double @f12(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.asl.i.p(i64 %v2, i32 1)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32) #1
>     +
>     +; CHECK-LABEL: f13:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vabsh(r{{[1-9]}}:{{[0-9]}})
>     +define double @f13(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.A2.vabsh(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.A2.vabsh(i64) #1
>     +
>     +; CHECK-LABEL: f14:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vabsh(r{{[1-9]}}:{{[0-9]}}):sat
>     +define double @f14(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.A2.vabshsat(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.A2.vabshsat(i64) #1
>     +
>     +; CHECK-LABEL: f15:
>     +; CHECK: r{{[0-9]}}:{{[0-9]}} = vasrh(r{{[1-9]}}:{{[0-9]}},#1)
>     +define double @f15(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %v2, i32 1)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32) #1
>     +
>     +; CHECK-LABEL: f16:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vlsrh(r{{[1-9]}}:{{[0-9]}},#1)
>     +define double @f16(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %v2, i32 1)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32) #1
>     +
>     +; CHECK-LABEL: f17:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vaslh(r{{[1-9]}}:{{[0-9]}},#1)
>     +define double @f17(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %v2, i32 1)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32) #1
>     +
>     +; CHECK-LABEL: f18:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vabsw(r{{[1-9]}}:{{[0-9]}})
>     +define double @f18(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.A2.vabsw(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.A2.vabsw(i64) #1
>     +
>     +; CHECK-LABEL: f19:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vabsw(r{{[1-9]}}:{{[0-9]}}):sat
>     +define double @f19(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.A2.vabswsat(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.A2.vabswsat(i64) #1
>     +
>     +; CHECK-LABEL: f20:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vasrw(r{{[1-9]}}:{{[0-9]}},#1)
>     +define double @f20(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %v2, i32 1)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32) #1
>     +
>     +; CHECK-LABEL: f21:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vlsrw(r{{[1-9]}}:{{[0-9]}},#1)
>     +define double @f21(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %v2, i32 1)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32) #1
>     +
>     +; CHECK-LABEL: f22:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = vaslw(r{{[1-9]}}:{{[0-9]}},#1)
>     +define double @f22(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %v2, i32 1)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32) #1
>     +
>     +; CHECK-LABEL: f23:
>     +; CHECK: r{{[1-9]}}:{{[0-9]}} = brev(r{{[1-9]}}:{{[0-9]}})
>     +define double @f23(double %a0) #0 {
>     +b0:
>     +  %v0 = alloca double, align 8
>     +  store double %a0, double* %v0, align 8
>     +  %v1 = load double, double* %v0, align 8
>     +  %v2 = fptosi double %v1 to i64
>     +  %v3 = call i64 @llvm.hexagon.S2.brevp(i64 %v2)
>     +  %v4 = sitofp i64 %v3 to double
>     +  ret double %v4
>     +}
>     +
>     +declare i64 @llvm.hexagon.S2.brevp(i64) #1
>     +
>     +attributes #0 = { nounwind }
>     +attributes #1 = { nounwind readnone }
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/fp_latency.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/fp_latency.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/fp_latency.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/fp_latency.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/fp_latency.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,88 @@
>     +; RUN: llc -O2 -march=hexagon -fp-contract=fast
>     -pipeliner-prune-loop-carried=false < %s | FileCheck %s
>     +
>     +; Test that there is 1 packet between the FP result and its use.
>     +
>     +; CHECK: loop0([[LOOP0:.LBB[0-9_]+]],
>     +; CHECK: [[LOOP0]]
>     +; CHECK: [[REG0:(r[0-9]+)]] += sfmpy(r{{[0-9]+}},r{{[0-9]+}})
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: r{{[0-9]+}} = {{.*}}[[REG0]]
>     +
>     +; Function Attrs: nounwind readnone
>     +define void @f0(i32 %a0, i32 %a1) #0 {
>     +b0:
>     +  %v0 = alloca [1000 x float], align 64
>     +  %v1 = alloca [1000 x float], align 64
>     +  %v2 = alloca [1000 x float], align 64
>     +  %v3 = alloca [1000 x float], align 64
>     +  %v4 = bitcast [1000 x float]* %v0 to i8*
>     +  call void @llvm.lifetime.start.p0i8(i64 4000, i8* %v4) #2
>     +  %v5 = bitcast [1000 x float]* %v1 to i8*
>     +  call void @llvm.lifetime.start.p0i8(i64 4000, i8* %v5) #2
>     +  %v6 = bitcast [1000 x float]* %v2 to i8*
>     +  call void @llvm.lifetime.start.p0i8(i64 4000, i8* %v6) #2
>     +  %v7 = bitcast [1000 x float]* %v3 to i8*
>     +  call void @llvm.lifetime.start.p0i8(i64 4000, i8* %v7) #2
>     +  %v8 = icmp sgt i32 %a1, 0
>     +  %v9 = add i32 %a1, -1
>     +  %v10 = getelementptr [1000 x float], [1000 x float]* %v3, i32 0,
>     i32 0
>     +  br label %b1
>     +
>     +b1:                                               ; preds = %b3, %b0
>     +  %v11 = phi i32 [ 0, %b0 ], [ %v34, %b3 ]
>     +  br i1 %v8, label %b2, label %b3
>     +
>     +b2:                                               ; preds = %b2, %b1
>     +  %v12 = phi float* [ %v33, %b2 ], [ %v10, %b1 ]
>     +  %v13 = phi i32 [ %v31, %b2 ], [ 0, %b1 ]
>     +  %v14 = mul nsw i32 %v13, %a1
>     +  %v15 = add nsw i32 %v14, %v11
>     +  %v16 = getelementptr inbounds [1000 x float], [1000 x float]*
>     %v1, i32 0, i32 %v15
>     +  %v17 = load float, float* %v16, align 4, !tbaa !0
>     +  %v18 = fmul float %v17, undef
>     +  %v19 = mul nsw i32 %v13, 25
>     +  %v20 = add nsw i32 %v19, %v11
>     +  %v21 = getelementptr inbounds [1000 x float], [1000 x float]*
>     %v2, i32 0, i32 %v20
>     +  %v22 = load float, float* %v21, align 4, !tbaa !0
>     +  %v23 = fmul float %v22, undef
>     +  %v24 = fadd float %v18, %v23
>     +  %v25 = load float, float* %v12, align 4, !tbaa !0
>     +  %v26 = fmul float %v25, undef
>     +  %v27 = fadd float %v24, %v26
>     +  %v28 = getelementptr inbounds [1000 x float], [1000 x float]*
>     %v0, i32 0, i32 %v20
>     +  %v29 = load float, float* %v28, align 4, !tbaa !0
>     +  %v30 = fadd float %v29, %v27
>     +  store float %v30, float* %v28, align 4, !tbaa !0
>     +  %v31 = add nuw nsw i32 %v13, 1
>     +  %v32 = icmp eq i32 %v13, %v9
>     +  %v33 = getelementptr float, float* %v12, i32 1
>     +  br i1 %v32, label %b3, label %b2
>     +
>     +b3:                                               ; preds = %b2, %b1
>     +  %v34 = add nuw nsw i32 %v11, 1
>     +  %v35 = icmp eq i32 %v34, 25
>     +  br i1 %v35, label %b4, label %b1
>     +
>     +b4:                                               ; preds = %b3
>     +  call void @llvm.lifetime.end.p0i8(i64 4000, i8* %v7) #2
>     +  call void @llvm.lifetime.end.p0i8(i64 4000, i8* %v6) #2
>     +  call void @llvm.lifetime.end.p0i8(i64 4000, i8* %v5) #2
>     +  call void @llvm.lifetime.end.p0i8(i64 4000, i8* %v4) #2
>     +  ret void
>     +}
>     +
>     +; Function Attrs: argmemonly nounwind
>     +declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
>     +
>     +; Function Attrs: argmemonly nounwind
>     +declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
>     +
>     +attributes #0 = { nounwind readnone "target-cpu"="hexagonv60"
>     "target-features"="+hvxv60,+hvx-length64b" }
>     +attributes #1 = { argmemonly nounwind }
>     +attributes #2 = { nounwind }
>     +
>     +!0 = !{!1, !1, i64 0}
>     +!1 = !{!"float", !2, i64 0}
>     +!2 = !{!"omnipotent char", !3, i64 0}
>     +!3 = !{!"Simple C/C++ TBAA"}
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_01.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_01.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_01.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_01.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_01.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,44 @@
>     +; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
>     +; Test that we do generate max #u5 in memops.
>     +; CHECK: memb(r{{[0-9]+}}+#0) -= #31
>     +
>     + at g0 = unnamed_addr global i8 112, align 1
>     +
>     +; Function Attrs: norecurse nounwind
>     +define fastcc void @f0() unnamed_addr #0 {
>     +b0:
>     +  %v0 = load i8, i8* @g0, align 1, !tbaa !4
>     +  %v1 = zext i8 %v0 to i32
>     +  %v2 = mul nuw nsw i32 %v1, 9625
>     +  %v3 = and i32 %v2, 255
>     +  %v4 = mul nuw nsw i32 %v3, 9625
>     +  %v5 = and i32 %v4, 255
>     +  %v6 = trunc i32 %v5 to i8
>     +  store i8 %v6, i8* @g0, align 1, !tbaa !4
>     +  ret void
>     +}
>     +
>     +define i32 @f1() {
>     +b0:
>     +  %v0 = load i8, i8* @g0, align 1, !tbaa !4
>     +  %v1 = zext i8 %v0 to i32
>     +  %v2 = add nuw nsw i32 %v1, 225
>     +  %v3 = trunc i32 %v2 to i8
>     +  store i8 %v3, i8* @g0, align 1, !tbaa !4
>     +  tail call fastcc void @f0()
>     +  %v4 = load i8, i8* @g0, align 1, !tbaa !4
>     +  %v5 = zext i8 %v4 to i32
>     +  ret i32 %v5
>     +}
>     +
>     +attributes #0 = { norecurse nounwind "target-cpu"="hexagonv55" }
>     +
>     +!llvm.module.flags = !{!0, !2}
>     +
>     +!0 = !{i32 6, !"Target CPU", !1}
>     +!1 = !{!"hexagonv55"}
>     +!2 = !{i32 6, !"Target Features", !3}
>     +!3 = !{!"-hvx"}
>     +!4 = !{!5, !5, i64 0}
>     +!5 = !{!"omnipotent char", !6, i64 0}
>     +!6 = !{!"Simple C/C++ TBAA"}
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,44 @@
>     +; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
>     +; Test that we do generate max #u5 in memops.
>     +; CHECK: memh(r{{[0-9]+}}+#0) -= #31
>     +
>     + at g0 = unnamed_addr global i16 -32, align 2
>     +
>     +; Function Attrs: norecurse nounwind
>     +define fastcc void @f0() unnamed_addr #0 {
>     +b0:
>     +  %v0 = load i16, i16* @g0, align 1, !tbaa !4
>     +  %v1 = zext i16 %v0 to i32
>     +  %v2 = mul nuw nsw i32 %v1, 9625
>     +  %v3 = and i32 %v2, 255
>     +  %v4 = mul nuw nsw i32 %v3, 9625
>     +  %v5 = and i32 %v4, 255
>     +  %v6 = trunc i32 %v5 to i16
>     +  store i16 %v6, i16* @g0, align 2, !tbaa !4
>     +  ret void
>     +}
>     +
>     +define i32 @f1() {
>     +b0:
>     +  %v0 = load i16, i16* @g0, align 2, !tbaa !4
>     +  %v1 = zext i16 %v0 to i32
>     +  %v2 = add nuw nsw i32 %v1, 65505
>     +  %v3 = trunc i32 %v2 to i16
>     +  store i16 %v3, i16* @g0, align 2, !tbaa !4
>     +  tail call fastcc void @f0()
>     +  %v4 = load i16, i16* @g0, align 2, !tbaa !4
>     +  %v5 = zext i16 %v4 to i32
>     +  ret i32 %v5
>     +}
>     +
>     +attributes #0 = { norecurse nounwind "target-cpu"="hexagonv55" }
>     +
>     +!llvm.module.flags = !{!0, !2}
>     +
>     +!0 = !{i32 6, !"Target CPU", !1}
>     +!1 = !{!"hexagonv55"}
>     +!2 = !{i32 6, !"Target Features", !3}
>     +!3 = !{!"-hvx"}
>     +!4 = !{!5, !5, i64 0}
>     +!5 = !{!"omnipotent char", !6, i64 0}
>     +!6 = !{!"Simple C/C++ TBAA"}
> 
>     Modified: llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll?rev=327884&r1=327883&r2=327884&view=diff
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll?rev=327884&r1=327883&r2=327884&view=diff>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll (original)
>     +++ llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll Mon Mar 19
>     12:03:18 2018
>     @@ -31,7 +31,7 @@ b0:
>         ret i32 %v5
>       }
> 
>     -attributes #0 = { norecurse nounwind }
>     +attributes #0 = { norecurse nounwind "target-cpu"="hexagonv55" }
> 
>       !llvm.module.flags = !{!0, !2}
> 
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/postinc-order.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/postinc-order.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/postinc-order.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/postinc-order.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/postinc-order.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,68 @@
>     +; RUN: llc -march=hexagon < %s | FileCheck %s
>     +
>     +; Check that store is post-incremented.
>     +; CHECK: memd(r{{[0-9]+}}++#8) = r
>     +
>     +; Function Attrs: nounwind
>     +define void @f0(i32 %a0, i16* nocapture %a1, i16 signext %a2) #0 {
>     +b0:
>     +  %v0 = icmp eq i32 %a0, 0
>     +  br i1 %v0, label %b2, label %b3
>     +
>     +b1:                                               ; preds = %b10
>     +  br label %b2
>     +
>     +b2:                                               ; preds = %b7,
>     %b1, %b0
>     +  ret void
>     +
>     +b3:                                               ; preds = %b0
>     +  %v1 = icmp sgt i32 %a0, 3
>     +  br i1 %v1, label %b4, label %b7
>     +
>     +b4:                                               ; preds = %b3
>     +  %v2 = add i32 %a0, -1
>     +  %v3 = and i32 %v2, -4
>     +  %v4 = icmp sgt i32 %v3, 0
>     +  br i1 %v4, label %b5, label %b7
>     +
>     +b5:                                               ; preds = %b4
>     +  %v5 = insertelement <4 x i16> undef, i16 %a2, i32 0
>     +  %v6 = insertelement <4 x i16> %v5, i16 %a2, i32 1
>     +  %v7 = insertelement <4 x i16> %v6, i16 %a2, i32 2
>     +  %v8 = insertelement <4 x i16> %v7, i16 %a2, i32 3
>     +  br label %b9
>     +
>     +b6:                                               ; preds = %b9
>     +  br label %b7
>     +
>     +b7:                                               ; preds = %b6,
>     %b4, %b3
>     +  %v9 = phi i32 [ 0, %b3 ], [ %v3, %b4 ], [ %v3, %b6 ]
>     +  %v10 = icmp slt i32 %v9, %a0
>     +  br i1 %v10, label %b8, label %b2
>     +
>     +b8:                                               ; preds = %b7
>     +  br label %b10
>     +
>     +b9:                                               ; preds = %b9, %b5
>     +  %v11 = phi i32 [ 0, %b5 ], [ %v12, %b9 ]
>     +  %v12 = add nsw i32 %v11, 4
>     +  %v13 = getelementptr i16, i16* %a1, i32 %v11
>     +  %v14 = bitcast i16* %v13 to <4 x i16>*
>     +  %v15 = load <4 x i16>, <4 x i16>* %v14, align 16
>     +  %v16 = add <4 x i16> %v15, %v8
>     +  store <4 x i16> %v16, <4 x i16>* %v14, align 16
>     +  %v17 = icmp slt i32 %v12, %v3
>     +  br i1 %v17, label %b9, label %b6
>     +
>     +b10:                                              ; preds = %b10, %b8
>     +  %v18 = phi i32 [ %v19, %b10 ], [ %v9, %b8 ]
>     +  %v19 = add nsw i32 %v18, 1
>     +  %v20 = getelementptr i16, i16* %a1, i32 %v18
>     +  %v21 = load i16, i16* %v20, align 2
>     +  %v22 = add i16 %v21, %a2
>     +  store i16 %v22, i16* %v20, align 2
>     +  %v23 = icmp eq i32 %v19, %a0
>     +  br i1 %v23, label %b1, label %b10
>     +}
>     +
>     +attributes #0 = { nounwind "target-cpu"="hexagonv55" }
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/swp-change-dep1.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-change-dep1.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-change-dep1.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/swp-change-dep1.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/swp-change-dep1.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,43 @@
>     +; RUN: llc -march=hexagon -enable-pipeliner -pipeliner-max-stages=1
>     < %s | FileCheck %s
>     +
>     +; Test that we update the offset correctly for loads that are
>     +; moved past stores. In these cases, we change the dependences
>     +; to make it easier to move the instructions, and we have to update
>     +; the register/offsets correctly after the schedule is finalized.
>     +
>     + at g0 = common global [400 x i32] zeroinitializer, align 8
>     + at g1 = common global [400 x i32] zeroinitializer, align 8
>     +
>     +; Function Attrs: nounwind
>     +define void @f0() #0 {
>     +b0:
>     +  br label %b2
>     +
>     +b1:                                               ; preds = %b2
>     +  ret void
>     +
>     +; CHECK: loop0(.LBB0_[[LOOP:.]],
>     +; CHECK: .LBB0_[[LOOP]]:
>     +; CHECK: = memd([[REG1:(r[0-9]+)]]+#8)
>     +; CHECK: memd([[REG1]]++#8) =
>     +; CHECK: }{{[ \t]*}}:endloop
>     +
>     +b2:                                               ; preds = %b2, %b0
>     +  %v0 = phi i32* [ getelementptr inbounds ([400 x i32], [400 x
>     i32]* @g0, i32 0, i32 0), %b0 ], [ %v11, %b2 ]
>     +  %v1 = phi i32* [ getelementptr inbounds ([400 x i32], [400 x
>     i32]* @g1, i32 0, i32 0), %b0 ], [ %v12, %b2 ]
>     +  %v2 = phi i32 [ 0, %b0 ], [ %v9, %b2 ]
>     +  %v3 = bitcast i32* %v0 to <2 x i32>*
>     +  %v4 = load <2 x i32>, <2 x i32>* %v3, align 8
>     +  %v5 = mul <2 x i32> %v4, <i32 7, i32 7>
>     +  %v6 = bitcast i32* %v1 to <2 x i32>*
>     +  %v7 = load <2 x i32>, <2 x i32>* %v6, align 8
>     +  %v8 = add <2 x i32> %v7, %v5
>     +  store <2 x i32> %v8, <2 x i32>* %v6, align 8
>     +  %v9 = add nsw i32 %v2, 2
>     +  %v10 = icmp slt i32 %v2, 398
>     +  %v11 = getelementptr i32, i32* %v0, i32 2
>     +  %v12 = getelementptr i32, i32* %v1, i32 2
>     +  br i1 %v10, label %b2, label %b1
>     +}
>     +
>     +attributes #0 = { nounwind "target-cpu"="hexagonv55" }
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/swp-conv3x3-nested.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-conv3x3-nested.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-conv3x3-nested.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/swp-conv3x3-nested.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/swp-conv3x3-nested.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,185 @@
>     +; RUN: llc -march=hexagon < %s | FileCheck %s
>     +
>     +; This version of the conv3x3 test has both loops. This test checks
>     that the
>     +; inner loop has 13 packets.
>     +
>     +; CHECK: loop0(.LBB0_[[LOOP:.]],
>     +; CHECK: .LBB0_[[LOOP]]:
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK-NOT: }
>     +; CHECK: }{{[ \t]*}}:endloop0
>     +
>     +declare <16 x i32> @llvm.hexagon.V6.vd0() #0
>     +declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x
>     i32>, i32) #0
>     +declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #0
>     +declare <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32>, i32, i32) #0
>     +declare <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32>, <32 x
>     i32>, i32, i32) #0
>     +declare <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32>, <16 x
>     i32>, i32) #0
>     +declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #0
>     +declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #0
>     +declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #0
>     +declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x
>     i32>, i32) #0
>     +
>     +define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32
>     %a2, i32 %a3, i8* noalias nocapture readonly %a4, i32 %a5, i8*
>     noalias nocapture %a6) local_unnamed_addr #1 {
>     +b0:
>     +  %v0 = add nsw i32 %a3, -1
>     +  %v1 = icmp sgt i32 %a3, 2
>     +  br i1 %v1, label %b1, label %b6
>     +
>     +b1:                                               ; preds = %b0
>     +  %v2 = getelementptr inbounds i8, i8* %a6, i32 %a1
>     +  %v3 = getelementptr inbounds i8, i8* %a0, i32 %a1
>     +  %v4 = bitcast i8* %a4 to i32*
>     +  %v5 = load i32, i32* %v4, align 4, !tbaa !1, !alias.scope !5,
>     !noalias !8
>     +  %v6 = getelementptr inbounds i8, i8* %a4, i32 4
>     +  %v7 = bitcast i8* %v6 to i32*
>     +  %v8 = load i32, i32* %v7, align 4, !tbaa !1, !alias.scope !5,
>     !noalias !8
>     +  %v9 = getelementptr inbounds i8, i8* %a4, i32 8
>     +  %v10 = bitcast i8* %v9 to i32*
>     +  %v11 = load i32, i32* %v10, align 4, !tbaa !1, !alias.scope !5,
>     !noalias !8
>     +  %v12 = sub i32 0, %a1
>     +  %v13 = shl nsw i32 %a1, 1
>     +  %v14 = tail call <16 x i32> @llvm.hexagon.V6.vd0() #2
>     +  %v15 = icmp sgt i32 %a2, 0
>     +  br label %b2
>     +
>     +b2:                                               ; preds = %b5, %b1
>     +  %v16 = phi i8* [ %v2, %b1 ], [ %v102, %b5 ]
>     +  %v17 = phi i8* [ %v3, %b1 ], [ %v21, %b5 ]
>     +  %v18 = phi i32 [ 1, %b1 ], [ %v103, %b5 ]
>     +  %v19 = getelementptr inbounds i8, i8* %v17, i32 %v12
>     +  %v20 = getelementptr inbounds i8, i8* %v17, i32 %a1
>     +  %v21 = getelementptr inbounds i8, i8* %v17, i32 %v13
>     +  br i1 %v15, label %b3, label %b5
>     +
>     +b3:                                               ; preds = %b2
>     +  %v22 = bitcast i8* %v21 to <16 x i32>*
>     +  %v23 = load <16 x i32>, <16 x i32>* %v22, align 64, !tbaa !11,
>     !alias.scope !12, !noalias !13
>     +  %v24 = getelementptr inbounds i8, i8* %v21, i32 64
>     +  %v25 = bitcast i8* %v24 to <16 x i32>*
>     +  %v26 = bitcast i8* %v20 to <16 x i32>*
>     +  %v27 = load <16 x i32>, <16 x i32>* %v26, align 64, !tbaa !11,
>     !alias.scope !12, !noalias !13
>     +  %v28 = getelementptr inbounds i8, i8* %v20, i32 64
>     +  %v29 = bitcast i8* %v28 to <16 x i32>*
>     +  %v30 = bitcast i8* %v17 to <16 x i32>*
>     +  %v31 = load <16 x i32>, <16 x i32>* %v30, align 64, !tbaa !11,
>     !alias.scope !12, !noalias !13
>     +  %v32 = getelementptr inbounds i8, i8* %v17, i32 64
>     +  %v33 = bitcast i8* %v32 to <16 x i32>*
>     +  %v34 = bitcast i8* %v19 to <16 x i32>*
>     +  %v35 = load <16 x i32>, <16 x i32>* %v34, align 64, !tbaa !11,
>     !alias.scope !12, !noalias !13
>     +  %v36 = getelementptr inbounds i8, i8* %v19, i32 64
>     +  %v37 = bitcast i8* %v36 to <16 x i32>*
>     +  %v38 = getelementptr inbounds i8, i8* %v16, i32 %a1
>     +  %v39 = bitcast i8* %v38 to <16 x i32>*
>     +  %v40 = bitcast i8* %v16 to <16 x i32>*
>     +  br label %b4
>     +
>     +b4:                                               ; preds = %b4, %b3
>     +  %v41 = phi <16 x i32>* [ %v39, %b3 ], [ %v99, %b4 ]
>     +  %v42 = phi <16 x i32>* [ %v40, %b3 ], [ %v84, %b4 ]
>     +  %v43 = phi <16 x i32>* [ %v25, %b3 ], [ %v60, %b4 ]
>     +  %v44 = phi <16 x i32>* [ %v29, %b3 ], [ %v58, %b4 ]
>     +  %v45 = phi <16 x i32>* [ %v33, %b3 ], [ %v56, %b4 ]
>     +  %v46 = phi <16 x i32>* [ %v37, %b3 ], [ %v54, %b4 ]
>     +  %v47 = phi i32 [ %a2, %b3 ], [ %v100, %b4 ]
>     +  %v48 = phi <16 x i32> [ %v35, %b3 ], [ %v55, %b4 ]
>     +  %v49 = phi <16 x i32> [ %v31, %b3 ], [ %v57, %b4 ]
>     +  %v50 = phi <16 x i32> [ %v27, %b3 ], [ %v59, %b4 ]
>     +  %v51 = phi <16 x i32> [ %v23, %b3 ], [ %v61, %b4 ]
>     +  %v52 = phi <16 x i32> [ %v14, %b3 ], [ %v82, %b4 ]
>     +  %v53 = phi <16 x i32> [ %v14, %b3 ], [ %v97, %b4 ]
>     +  %v54 = getelementptr inbounds <16 x i32>, <16 x i32>* %v46, i32 1
>     +  %v55 = load <16 x i32>, <16 x i32>* %v46, align 64, !tbaa !11,
>     !alias.scope !12, !noalias !13
>     +  %v56 = getelementptr inbounds <16 x i32>, <16 x i32>* %v45, i32 1
>     +  %v57 = load <16 x i32>, <16 x i32>* %v45, align 64, !tbaa !11,
>     !alias.scope !12, !noalias !13
>     +  %v58 = getelementptr inbounds <16 x i32>, <16 x i32>* %v44, i32 1
>     +  %v59 = load <16 x i32>, <16 x i32>* %v44, align 64, !tbaa !11,
>     !alias.scope !12, !noalias !13
>     +  %v60 = getelementptr inbounds <16 x i32>, <16 x i32>* %v43, i32 1
>     +  %v61 = load <16 x i32>, <16 x i32>* %v43, align 64, !tbaa !11,
>     !alias.scope !12, !noalias !13
>     +  %v62 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>
>     %v55, <16 x i32> %v48, i32 4) #2
>     +  %v63 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>
>     %v57, <16 x i32> %v49, i32 4) #2
>     +  %v64 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>
>     %v59, <16 x i32> %v50, i32 4) #2
>     +  %v65 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>
>     %v61, <16 x i32> %v51, i32 4) #2
>     +  %v66 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>
>     %v62, <16 x i32> %v48) #2
>     +  %v67 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>
>     %v63, <16 x i32> %v49) #2
>     +  %v68 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>
>     %v64, <16 x i32> %v50) #2
>     +  %v69 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>
>     %v65, <16 x i32> %v51) #2
>     +  %v70 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32>
>     %v66, i32 %v5, i32 0) #2
>     +  %v71 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32>
>     %v66, i32 %v5, i32 1) #2
>     +  %v72 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x
>     i32> %v70, <32 x i32> %v67, i32 %v8, i32 0) #2
>     +  %v73 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x
>     i32> %v71, <32 x i32> %v67, i32 %v8, i32 1) #2
>     +  %v74 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x
>     i32> %v72, <32 x i32> %v68, i32 %v11, i32 0) #2
>     +  %v75 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x
>     i32> %v73, <32 x i32> %v68, i32 %v11, i32 1) #2
>     +  %v76 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v75) #2
>     +  %v77 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v75) #2
>     +  %v78 = tail call <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32>
>     %v76, <16 x i32> %v77, i32 %a5) #2
>     +  %v79 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v74) #2
>     +  %v80 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v74) #2
>     +  %v81 = tail call <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32>
>     %v79, <16 x i32> %v80, i32 %a5) #2
>     +  %v82 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>
>     %v78, <16 x i32> %v81) #2
>     +  %v83 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>
>     %v82, <16 x i32> %v52, i32 1) #2
>     +  %v84 = getelementptr inbounds <16 x i32>, <16 x i32>* %v42, i32 1
>     +  store <16 x i32> %v83, <16 x i32>* %v42, align 64, !tbaa !11,
>     !alias.scope !14, !noalias !15
>     +  %v85 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32>
>     %v67, i32 %v5, i32 0) #2
>     +  %v86 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32>
>     %v67, i32 %v5, i32 1) #2
>     +  %v87 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x
>     i32> %v85, <32 x i32> %v68, i32 %v8, i32 0) #2
>     +  %v88 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x
>     i32> %v86, <32 x i32> %v68, i32 %v8, i32 1) #2
>     +  %v89 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x
>     i32> %v87, <32 x i32> %v69, i32 %v11, i32 0) #2
>     +  %v90 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x
>     i32> %v88, <32 x i32> %v69, i32 %v11, i32 1) #2
>     +  %v91 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v90) #2
>     +  %v92 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v90) #2
>     +  %v93 = tail call <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32>
>     %v91, <16 x i32> %v92, i32 %a5) #2
>     +  %v94 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v89) #2
>     +  %v95 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v89) #2
>     +  %v96 = tail call <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32>
>     %v94, <16 x i32> %v95, i32 %a5) #2
>     +  %v97 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>
>     %v93, <16 x i32> %v96) #2
>     +  %v98 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>
>     %v97, <16 x i32> %v53, i32 1) #2
>     +  %v99 = getelementptr inbounds <16 x i32>, <16 x i32>* %v41, i32 1
>     +  store <16 x i32> %v98, <16 x i32>* %v41, align 64, !tbaa !11,
>     !alias.scope !14, !noalias !15
>     +  %v100 = add nsw i32 %v47, -64
>     +  %v101 = icmp sgt i32 %v47, 64
>     +  br i1 %v101, label %b4, label %b5
>     +
>     +b5:                                               ; preds = %b4, %b2
>     +  %v102 = getelementptr inbounds i8, i8* %v16, i32 %v13
>     +  %v103 = add nuw nsw i32 %v18, 2
>     +  %v104 = icmp slt i32 %v103, %v0
>     +  br i1 %v104, label %b2, label %b6
>     +
>     +b6:                                               ; preds = %b5, %b0
>     +  ret void
>     +}
>     +
>     +attributes #0 = { nounwind readnone }
>     +attributes #1 = { nounwind "target-cpu"="hexagonv62"
>     "target-features"="+hvx-length64b,+hvxv62" }
>     +attributes #2 = { nounwind }
>     +
>     +!llvm.module.flags = !{!0}
>     +
>     +!0 = !{i32 1, !"wchar_size", i32 4}
>     +!1 = !{!2, !2, i64 0}
>     +!2 = !{!"int", !3, i64 0}
>     +!3 = !{!"omnipotent char", !4, i64 0}
>     +!4 = !{!"Simple C/C++ TBAA"}
>     +!5 = !{!6}
>     +!6 = distinct !{!6, !7, !"x: %a"}
>     +!7 = distinct !{!7, !"x"}
>     +!8 = !{!9, !10}
>     +!9 = distinct !{!9, !7, !"x: %b"}
>     +!10 = distinct !{!10, !7, !"x: %c"}
>     +!11 = !{!3, !3, i64 0}
>     +!12 = !{!9}
>     +!13 = !{!6, !10}
>     +!14 = !{!10}
>     +!15 = !{!9, !6}
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/swp-dep-neg-offset.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-dep-neg-offset.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-dep-neg-offset.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/swp-dep-neg-offset.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/swp-dep-neg-offset.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,67 @@
>     +; RUN: llc -march=hexagon -enable-pipeliner < %s | FileCheck %s
>     +
>     +; Test that the code that changes the dependences does not allow
>     +; a load with a negative offset to be overlapped with the post
>     +; increment store that generates the base register.
>     +
>     +; CHECK: loop0(.LBB0_[[LOOP:.]],
>     +; CHECK: .LBB0_[[LOOP]]:
>     +; CHECK: = mem{{u?}}b([[REG:(r[0-9])+]]+#-1)
>     +; CHECK-NOT: memb([[REG]]{{\+?}}#0) =
>     +; CHECK: }
>     +; CHECK: }{{[ \t]*}}:endloop0
>     +
>     + at g0 = external global [1000000 x i8], align 8
>     +
>     +; Function Attrs: nounwind
>     +define void @f0(i32 %a0, [1000 x i8]* %a1, [1000 x i8]* %a2) #0 {
>     +b0:
>     +  br i1 undef, label %b1, label %b7
>     +
>     +b1:                                               ; preds = %b0
>     +  br i1 undef, label %b2, label %b6
>     +
>     +b2:                                               ; preds = %b5, %b1
>     +  br i1 undef, label %b3, label %b5
>     +
>     +b3:                                               ; preds = %b3, %b2
>     +  %v0 = phi i32 [ %v17, %b3 ], [ 1, %b2 ]
>     +  %v1 = phi i32 [ %v16, %b3 ], [ 0, %b2 ]
>     +  %v2 = add nsw i32 %v0, -1
>     +  %v3 = getelementptr inbounds [1000 x i8], [1000 x i8]* %a1, i32
>     undef, i32 %v2
>     +  %v4 = load i8, i8* %v3, align 1, !tbaa !0
>     +  %v5 = zext i8 %v4 to i32
>     +  %v6 = getelementptr inbounds [1000000 x i8], [1000000 x i8]* @g0,
>     i32 0, i32 %v1
>     +  %v7 = load i8, i8* %v6, align 1, !tbaa !0
>     +  %v8 = sext i8 %v7 to i32
>     +  %v9 = getelementptr inbounds [1000 x i8], [1000 x i8]* %a2, i32
>     undef, i32 %v0
>     +  %v10 = load i8, i8* %v9, align 1, !tbaa !0
>     +  %v11 = sext i8 %v10 to i32
>     +  %v12 = mul nsw i32 %v11, %v8
>     +  %v13 = add nsw i32 %v12, %v5
>     +  %v14 = trunc i32 %v13 to i8
>     +  %v15 = getelementptr inbounds [1000 x i8], [1000 x i8]* %a1, i32
>     undef, i32 %v0
>     +  store i8 %v14, i8* %v15, align 1, !tbaa !0
>     +  %v16 = add nsw i32 %v1, 1
>     +  %v17 = add nsw i32 %v0, 1
>     +  %v18 = icmp eq i32 %v17, %a0
>     +  br i1 %v18, label %b4, label %b3
>     +
>     +b4:                                               ; preds = %b3
>     +  br label %b5
>     +
>     +b5:                                               ; preds = %b4, %b2
>     +  br i1 undef, label %b6, label %b2
>     +
>     +b6:                                               ; preds = %b5, %b1
>     +  unreachable
>     +
>     +b7:                                               ; preds = %b0
>     +  ret void
>     +}
>     +
>     +attributes #0 = { nounwind "target-cpu"="hexagonv55" }
>     +
>     +!0 = !{!1, !1, i64 0}
>     +!1 = !{!"omnipotent char", !2, i64 0}
>     +!2 = !{!"Simple C/C++ TBAA"}
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi2.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi2.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi2.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi2.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi2.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,65 @@
>     +; RUN: llc -march=hexagon -enable-pipeliner -pipeliner-max-stages=3
>     < %s | FileCheck %s
>     +
>     +%s.0 = type { i16, i8, i8, i16, i8, i8, i16, i8, i8, i8, i8, i8,
>     i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16, i8, i8, %s.1, [2 x [16
>     x %s.2]], i32 (i8*, i8*, i8*, i8*, i8*)*, %s.3*, %s.3*, [120 x i8],
>     i8, i8, %s.4*, [2 x [120 x [8 x i8]]], [56 x i8], [2 x [121 x
>     %s.5]], [2 x %s.5], %s.5*, %s.5*, i32, i32, i16, i8, i8, %s.7, %s.9,
>     %s.11, %s.8*, %s.8* }
>     +%s.1 = type { i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, [16 x i8],
>     i8, [4 x i8], [32 x i16], [32 x i16], [2 x i8], [4 x i8], [2 x [4 x
>     i8]], [2 x [4 x i8]], i32, i32, i16, i8 }
>     +%s.2 = type { [2 x i16] }
>     +%s.3 = type { i16*, i16*, i32, i32 }
>     +%s.4 = type { i8*, i8*, i8*, i32, i32, i32, i32 }
>     +%s.5 = type { %s.6, [2 x [4 x %s.2]], [2 x [2 x i8]], [2 x i8] }
>     +%s.6 = type { i8, i8, i8, i8, i8, i8, i8, i8, i32 }
>     +%s.7 = type { [12 x %s.8], [4 x %s.8], [2 x %s.8], [4 x %s.8], [6 x
>     %s.8], [2 x [7 x %s.8]], [4 x %s.8], [3 x [4 x %s.8]], [3 x %s.8],
>     [3 x %s.8] }
>     +%s.8 = type { i8, i8 }
>     +%s.9 = type { [371 x %s.8], [6 x %s.10] }
>     +%s.10 = type { %s.8*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
>     +%s.11 = type { i32, i32, i8* }
>     +
>     +; Function Attrs: nounwind
>     +define void @f0(%s.0* %a0) #0 {
>     +b0:
>     +  %v0 = load i8, i8* undef, align 1, !tbaa !0
>     +  %v1 = icmp eq i8 %v0, 1
>     +  br i1 %v1, label %b1, label %b2
>     +
>     +; CHECK: loop0(.LBB0_[[LOOP:.]],
>     +; CHECK: .LBB0_[[LOOP]]:
>     +; CHECK: memh([[REG0:(r[0-9]+)]]+#0) = #0
>     +; CHECK: }{{[ \t]*}}:endloop0
>     +
>     +b1:                                               ; preds = %b1, %b0
>     +  %v2 = phi i16* [ %v17, %b1 ], [ undef, %b0 ]
>     +  %v3 = phi i32 [ %v18, %b1 ], [ 0, %b0 ]
>     +  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 25, i32
>     10, i32 %v3
>     +  %v5 = load i8, i8* %v4, align 1, !tbaa !0
>     +  %v6 = zext i8 %v5 to i16
>     +  %v7 = add nsw i32 %v3, 1
>     +  %v8 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 25, i32
>     10, i32 %v7
>     +  %v9 = load i8, i8* %v8, align 1, !tbaa !0
>     +  %v10 = or i16 0, %v6
>     +  %v11 = load i8, i8* undef, align 1, !tbaa !0
>     +  %v12 = zext i8 %v11 to i16
>     +  %v13 = shl nuw i16 %v12, 8
>     +  %v14 = or i16 %v10, %v13
>     +  %v15 = or i16 %v14, 0
>     +  %v16 = getelementptr inbounds i16, i16* %v2, i32 1
>     +  store i16* %v16, i16** null, align 4, !tbaa !3
>     +  store i16 %v15, i16* %v2, align 2, !tbaa !5
>     +  %v17 = getelementptr inbounds i16, i16* %v2, i32 2
>     +  store i16* %v17, i16** null, align 4, !tbaa !3
>     +  store i16 0, i16* %v16, align 2, !tbaa !5
>     +  %v18 = add nsw i32 %v3, 8
>     +  %v19 = icmp slt i32 %v18, undef
>     +  br i1 %v19, label %b1, label %b2
>     +
>     +b2:                                               ; preds = %b1, %b0
>     +  ret void
>     +}
>     +
>     +attributes #0 = { nounwind "target-cpu"="hexagonv55" }
>     +
>     +!0 = !{!1, !1, i64 0}
>     +!1 = !{!"omnipotent char", !2}
>     +!2 = !{!"Simple C/C++ TBAA"}
>     +!3 = !{!4, !4, i64 0}
>     +!4 = !{!"any pointer", !1}
>     +!5 = !{!6, !6, i64 0}
>     +!6 = !{!"short", !1}
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/swp-listen-loop3.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-listen-loop3.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-listen-loop3.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/swp-listen-loop3.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/swp-listen-loop3.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,69 @@
>     +; RUN: llc -march=hexagon -pipeliner-ignore-recmii
>     -pipeliner-max-stages=2 -enable-pipeliner < %s | FileCheck %s
>     +
>     +; This is a loop we pipeline to three packets, though we could do
>     bettter.
>     +
>     +; CHECK: loop0(.LBB0_[[LOOP:.]],
>     +; CHECK: .LBB0_[[LOOP]]:
>     +; CHECK: {
>     +; CHECK: }
>     +; CHECK: {
>     +; CHECK: }
>     +; CHECK: {
>     +; CHECK: }{{[ \t]*}}:endloop0
>     +
>     +; Function Attrs: nounwind
>     +define void @f0(i32* nocapture %a0, i16 signext %a1) #0 {
>     +b0:
>     +  %v0 = sext i16 %a1 to i32
>     +  %v1 = add i32 %v0, -1
>     +  %v2 = icmp sgt i32 %v1, 0
>     +  br i1 %v2, label %b1, label %b4
>     +
>     +b1:                                               ; preds = %b0
>     +  %v3 = getelementptr i32, i32* %a0, i32 %v1
>     +  %v4 = load i32, i32* %v3, align 4
>     +  br label %b2
>     +
>     +b2:                                               ; preds = %b2, %b1
>     +  %v5 = phi i32 [ %v16, %b2 ], [ %v1, %b1 ]
>     +  %v6 = phi i32 [ %v5, %b2 ], [ %v0, %b1 ]
>     +  %v7 = phi i32 [ %v10, %b2 ], [ %v4, %b1 ]
>     +  %v8 = add nsw i32 %v6, -2
>     +  %v9 = getelementptr inbounds i32, i32* %a0, i32 %v8
>     +  %v10 = load i32, i32* %v9, align 4, !tbaa !0
>     +  %v11 = tail call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %v10, i32 7946)
>     +  %v12 = tail call i64 @llvm.hexagon.S2.asl.r.p(i64 %v11, i32 -13)
>     +  %v13 = getelementptr inbounds i32, i32* %a0, i32 %v5
>     +  %v14 = tail call i32 @llvm.hexagon.A2.sat(i64 %v12)
>     +  %v15 = tail call i32 @llvm.hexagon.A2.subsat(i32 %v7, i32 %v14)
>     +  store i32 %v15, i32* %v13, align 4, !tbaa !0
>     +  %v16 = add nsw i32 %v5, -1
>     +  %v17 = icmp sgt i32 %v16, 0
>     +  br i1 %v17, label %b2, label %b3
>     +
>     +b3:                                               ; preds = %b2
>     +  br label %b4
>     +
>     +b4:                                               ; preds = %b3, %b0
>     +  ret void
>     +}
>     +
>     +; Function Attrs: nounwind readnone
>     +declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32) #1
>     +
>     +; Function Attrs: nounwind readnone
>     +declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32) #1
>     +
>     +; Function Attrs: nounwind readnone
>     +declare i32 @llvm.hexagon.A2.subsat(i32, i32) #1
>     +
>     +; Function Attrs: nounwind readnone
>     +declare i32 @llvm.hexagon.A2.sat(i64) #1
>     +
>     +attributes #0 = { nounwind "target-cpu"="hexagonv55" }
>     +attributes #1 = { nounwind readnone }
>     +
>     +!0 = !{!1, !1, i64 0}
>     +!1 = !{!"int", !2}
>     +!2 = !{!"omnipotent char", !3}
>     +!3 = !{!"Simple C/C++ TBAA"}
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/swp-max-stage3.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-max-stage3.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-max-stage3.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/swp-max-stage3.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/swp-max-stage3.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,50 @@
>     +; RUN: llc -march=hexagon -O3 -fp-contract=fast
>     -pipeliner-max-stages=3 < %s
>     +; REQUIRES: asserts
>     +
>     +; Check Phis are generated correctly in epilogs after setting
>     -swp-max-stages=3
>     +
>     + at g0 = private unnamed_addr constant [6 x i8] c"s4116\00", align 1
>     +
>     +; Function Attrs: noinline nounwind
>     +define void @f0(i32 %a0, i32 %a1, float* nocapture readonly %a2,
>     [1000 x float]* nocapture readonly %a3, i32* nocapture readonly %a4,
>     i32 %a5) #0 {
>     +b0:
>     +  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #2
>     +  %v1 = sitofp i32 %v0 to double
>     +  %v2 = add nsw i32 %a1, -1
>     +  %v3 = icmp sgt i32 %a1, 1
>     +  br i1 %v3, label %b1, label %b3
>     +
>     +b1:                                               ; preds = %b1, %b0
>     +  %v4 = phi float [ %v13, %b1 ], [ 0.000000e+00, %b0 ]
>     +  %v5 = phi float* [ %v16, %b1 ], [ %a2, %b0 ]
>     +  %v6 = phi i32* [ %v17, %b1 ], [ %a4, %b0 ]
>     +  %v7 = phi i32 [ %v14, %b1 ], [ 0, %b0 ]
>     +  %v8 = load float, float* %v5, align 4
>     +  %v9 = load i32, i32* %v6, align 4
>     +  %v10 = getelementptr inbounds [1000 x float], [1000 x float]*
>     %a3, i32 %v9, i32 %a5
>     +  %v11 = load float, float* %v10, align 4
>     +  %v12 = fmul float %v8, %v11
>     +  %v13 = fadd float %v4, %v12
>     +  %v14 = add nuw nsw i32 %v7, 1
>     +  %v15 = icmp slt i32 %v14, %v2
>     +  %v16 = getelementptr float, float* %v5, i32 1
>     +  %v17 = getelementptr i32, i32* %v6, i32 1
>     +  br i1 %v15, label %b1, label %b2
>     +
>     +b2:                                               ; preds = %b1
>     +  %v18 = fpext float %v13 to double
>     +  br label %b3
>     +
>     +b3:                                               ; preds = %b2, %b0
>     +  %v19 = phi double [ %v18, %b2 ], [ 0.000000e+00, %b0 ]
>     +  tail call void @f2(double %v19, i32 %a1, i32 %a1, double %v1, i8*
>     getelementptr inbounds ([6 x i8], [6 x i8]* @g0, i32 0, i32 0)) #2
>     +  ret void
>     +}
>     +
>     +declare i32 @f1(...) #1
>     +
>     +declare void @f2(double, i32, i32, double, i8*) #1
>     +
>     +attributes #0 = { noinline nounwind "target-cpu"="hexagonv60" }
>     +attributes #1 = { "target-cpu"="hexagonv60" }
>     +attributes #2 = { nounwind }
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-6.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-6.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-6.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-6.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-6.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,106 @@
>     +; RUN: llc -march=hexagon < %s | FileCheck %s
>     +
>     +; Test that the pipeliner generates correct code when attempting to
>     reuse
>     +; an existing phi. This test case contains a phi that references
>     another
>     +; phi (the value from the previous iteration), and when there is a
>     use that
>     +; is schedule in a later iteration. When this occurs, the pipeliner was
>     +; using a value from the wrong iteration.
>     +
>     +; CHECK: loop0(.LBB0_[[LOOP:.]],
>     +; CHECK: .LBB0_[[LOOP]]:
>     +; CHECK: vlalign([[VREG1:v([0-9]+)]],[[VREG2:v([0-9]+)]],#2)
>     +; CHECK: [[VREG2]]:{{[0-9]+}} = vcombine([[VREG1]],v{{[0-9]+}})
>     +; CHECK: }{{[ \t]*}}:endloop0
>     +
>     +; Function Attrs: nounwind
>     +define void @f0(i32 %a0, i32 %a1) local_unnamed_addr #0 {
>     +b0:
>     +  %v0 = shl nsw i32 %a0, 1
>     +  %v1 = sub i32 0, %v0
>     +  %v2 = sub i32 0, %a0
>     +  %v3 = getelementptr inbounds i8, i8* undef, i32 %v1
>     +  %v4 = getelementptr inbounds i8, i8* undef, i32 %v2
>     +  %v5 = getelementptr inbounds i8, i8* undef, i32 %a0
>     +  %v6 = getelementptr inbounds i8, i8* undef, i32 %v0
>     +  %v7 = getelementptr inbounds i8, i8* %v6, i32 64
>     +  %v8 = bitcast i8* %v7 to <16 x i32>*
>     +  %v9 = getelementptr inbounds i8, i8* %v5, i32 64
>     +  %v10 = bitcast i8* %v9 to <16 x i32>*
>     +  %v11 = getelementptr inbounds i8, i8* undef, i32 64
>     +  %v12 = bitcast i8* %v11 to <16 x i32>*
>     +  %v13 = getelementptr inbounds i8, i8* %v4, i32 64
>     +  %v14 = bitcast i8* %v13 to <16 x i32>*
>     +  %v15 = getelementptr inbounds i8, i8* %v3, i32 64
>     +  %v16 = bitcast i8* %v15 to <16 x i32>*
>     +  br label %b1
>     +
>     +b1:                                               ; preds = %b1, %b0
>     +  %v17 = phi <16 x i32>* [ %v59, %b1 ], [ undef, %b0 ]
>     +  %v18 = phi <16 x i32>* [ %v34, %b1 ], [ %v8, %b0 ]
>     +  %v19 = phi <16 x i32>* [ %v32, %b1 ], [ %v10, %b0 ]
>     +  %v20 = phi <16 x i32>* [ %v30, %b1 ], [ %v12, %b0 ]
>     +  %v21 = phi <16 x i32>* [ %v28, %b1 ], [ %v14, %b0 ]
>     +  %v22 = phi <16 x i32>* [ %v26, %b1 ], [ %v16, %b0 ]
>     +  %v23 = phi <32 x i32> [ %v39, %b1 ], [ undef, %b0 ]
>     +  %v24 = phi <32 x i32> [ %v23, %b1 ], [ undef, %b0 ]
>     +  %v25 = phi i32 [ %v60, %b1 ], [ %a1, %b0 ]
>     +  %v26 = getelementptr inbounds <16 x i32>, <16 x i32>* %v22, i32 1
>     +  %v27 = load <16 x i32>, <16 x i32>* %v22, align 64
>     +  %v28 = getelementptr inbounds <16 x i32>, <16 x i32>* %v21, i32 1
>     +  %v29 = load <16 x i32>, <16 x i32>* %v21, align 64
>     +  %v30 = getelementptr inbounds <16 x i32>, <16 x i32>* %v20, i32 1
>     +  %v31 = load <16 x i32>, <16 x i32>* %v20, align 64
>     +  %v32 = getelementptr inbounds <16 x i32>, <16 x i32>* %v19, i32 1
>     +  %v33 = load <16 x i32>, <16 x i32>* %v19, align 64
>     +  %v34 = getelementptr inbounds <16 x i32>, <16 x i32>* %v18, i32 1
>     +  %v35 = load <16 x i32>, <16 x i32>* %v18, align 64
>     +  %v36 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32>
>     %v27, <16 x i32> %v35) #2
>     +  %v37 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x
>     i32> %v36, <16 x i32> %v31, i32 101058054) #2
>     +  %v38 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>
>     %v33, <16 x i32> %v29) #2
>     +  %v39 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x
>     i32> %v37, <32 x i32> %v38, i32 67372036) #2
>     +  %v40 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v23) #2
>     +  %v41 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v24) #2
>     +  %v42 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>
>     %v40, <16 x i32> %v41, i32 2) #2
>     +  %v43 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v23) #2
>     +  %v44 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v24) #2
>     +  %v45 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>
>     %v43, <16 x i32> %v44, i32 2) #2
>     +  %v46 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v39) #2
>     +  %v47 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>
>     %v46, <16 x i32> %v40, i32 2) #2
>     +  %v48 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v39) #2
>     +  %v49 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>
>     %v48, <16 x i32> %v43, i32 2) #2
>     +  %v50 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>
>     %v45, <16 x i32> %v43) #2
>     +  %v51 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>
>     %v40, <16 x i32> %v47) #2
>     +  %v52 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>
>     %v42, <16 x i32> %v47) #2
>     +  %v53 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x
>     i32> %v52, <16 x i32> %v40, i32 101058054) #2
>     +  %v54 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x
>     i32> %v53, <16 x i32> %v50, i32 67372036) #2
>     +  %v55 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>
>     %v45, <16 x i32> %v49) #2
>     +  %v56 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x
>     i32> %v55, <16 x i32> %v43, i32 101058054) #2
>     +  %v57 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x
>     i32> %v56, <16 x i32> %v51, i32 67372036) #2
>     +  %v58 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32>
>     %v57, <16 x i32> %v54) #2
>     +  %v59 = getelementptr inbounds <16 x i32>, <16 x i32>* %v17, i32 1
>     +  store <16 x i32> %v58, <16 x i32>* %v17, align 64
>     +  %v60 = add nsw i32 %v25, -64
>     +  %v61 = icmp sgt i32 %v25, 128
>     +  br i1 %v61, label %b1, label %b2
>     +
>     +b2:                                               ; preds = %b1
>     +  ret void
>     +}
>     +
>     +; Function Attrs: nounwind readnone
>     +declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
>     +
>     +declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x
>     i32>, i32) #1
>     +declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
>     +declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x
>     i32>, i32) #1
>     +declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
>     +declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x
>     i32>, i32) #1
>     +declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
>     +declare <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32>, <16 x i32>) #1
>     +declare <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32>, <16 x
>     i32>, i32) #1
>     +declare <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32>, <16 x
>     i32>, i32) #1
>     +declare <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32>, <16 x i32>) #1
>     +
>     +attributes #0 = { nounwind "target-cpu"="hexagonv65"
>     "target-features"="+hvxv65,+hvx-length64b" }
>     +attributes #1 = { nounwind readnone }
>     +attributes #2 = { nounwind }
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/swp-sigma.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-sigma.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/swp-sigma.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/swp-sigma.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/swp-sigma.ll Mon Mar 19 12:03:18
>     2018
>     @@ -0,0 +1,203 @@
>     +; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
>     +
>     +; We do not pipeline sigma yet, but the non-pipelined version
>     +; with good scheduling is pretty fast. The compiler generates
>     +; 19 packets, and the assembly version is 16.
>     +
>     +; CHECK:  loop0(.LBB0_[[LOOP:.]],
>     +; CHECK: .LBB0_[[LOOP]]:
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK: }
>     +; CHECK-NOT: }
>     +; CHECK: }{{[ \t]*}}:endloop
>     +
>     + at g0 = external constant [10 x i16], align 128
>     +
>     +declare i32 @llvm.hexagon.S2.vsplatrb(i32) #0
>     +declare <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32>) #0
>     +declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #0
>     +declare <16 x i32> @llvm.hexagon.V6.vd0() #0
>     +declare <32 x i32> @llvm.hexagon.V6.vsububh(<16 x i32>, <16 x i32>) #0
>     +declare <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32>, <16 x
>     i32>) #0
>     +declare <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>, <16 x i32>) #0
>     +declare <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>, <16 x i32>,
>     <16 x i32>) #0
>     +declare <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>, <16 x i32>,
>     <16 x i32>) #0
>     +declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #0
>     +declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x
>     i32>, i32) #0
>     +declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x
>     i32>, i32) #0
>     +declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x
>     i32>, i32) #0
>     +declare <32 x i32> @llvm.hexagon.V6.vlutvwh(<16 x i32>, <16 x i32>,
>     i32) #0
>     +declare <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x i32>, <16 x
>     i32>) #0
>     +declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #0
>     +declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #0
>     +declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #0
>     +
>     +define void @f0(i8* nocapture readonly %a0, i32 %a1, i32 %a2, i32
>     %a3, i8 zeroext %a4, i8* nocapture %a5) #1 {
>     +b0:
>     +  %v0 = add nsw i32 %a3, -1
>     +  %v1 = icmp sgt i32 %v0, 1
>     +  br i1 %v1, label %b1, label %b8
>     +
>     +b1:                                               ; preds = %b0
>     +  %v2 = mul i32 %a1, 2
>     +  %v3 = load <16 x i32>, <16 x i32>* bitcast ([10 x i16]* @g0 to
>     <16 x i32>*), align 128
>     +  %v4 = tail call <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32>
>     %v3) #2
>     +  %v5 = zext i8 %a4 to i32
>     +  %v6 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v5) #2
>     +  %v7 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v6) #2
>     +  %v8 = tail call <16 x i32> @llvm.hexagon.V6.vd0() #2
>     +  %v9 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 16843009) #2
>     +  %v10 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32
>     33686018) #2
>     +  %v11 = icmp sgt i32 %a2, 64
>     +  %v12 = add i32 %a1, 64
>     +  %v13 = add i32 %v12, %a1
>     +  %v14 = icmp sgt i32 %a2, 0
>     +  %v15 = add i32 %a3, -2
>     +  %v16 = bitcast i8* %a0 to <16 x i32>*
>     +  %v17 = load <16 x i32>, <16 x i32>* %v16, align 64
>     +  br label %b2
>     +
>     +b2:                                               ; preds = %b7, %b1
>     +  %v18 = phi <16 x i32> [ %v17, %b1 ], [ %v28, %b7 ]
>     +  %v19 = phi i8* [ %a0, %b1 ], [ %v23, %b7 ]
>     +  %v20 = phi i8* [ %a5, %b1 ], [ %v22, %b7 ]
>     +  %v21 = phi i32 [ 1, %b1 ], [ %v118, %b7 ]
>     +  %v22 = getelementptr inbounds i8, i8* %v20, i32 %a1
>     +  %v23 = getelementptr inbounds i8, i8* %v19, i32 %a1
>     +  %v24 = bitcast i8* %v23 to <16 x i32>*
>     +  %v25 = getelementptr inbounds i8, i8* %v19, i32 %v2
>     +  %v26 = bitcast i8* %v25 to <16 x i32>*
>     +  %v27 = bitcast i8* %v22 to <16 x i32>*
>     +  %v28 = load <16 x i32>, <16 x i32>* %v24, align 64
>     +  %v29 = load <16 x i32>, <16 x i32>* %v26, align 64
>     +  br i1 %v11, label %b3, label %b4
>     +
>     +b3:                                               ; preds = %b2
>     +  %v30 = getelementptr inbounds i8, i8* %v19, i32 64
>     +  %v31 = getelementptr inbounds i8, i8* %v19, i32 %v12
>     +  %v32 = bitcast i8* %v31 to <16 x i32>*
>     +  %v33 = getelementptr inbounds i8, i8* %v19, i32 %v13
>     +  %v34 = bitcast i8* %v33 to <16 x i32>*
>     +  br label %b5
>     +
>     +b4:                                               ; preds = %b2
>     +  br i1 %v14, label %b5, label %b7
>     +
>     +b5:                                               ; preds = %b4, %b3
>     +  %v35 = phi <16 x i32>* [ %v26, %b4 ], [ %v34, %b3 ]
>     +  %v36 = phi <16 x i32>* [ %v24, %b4 ], [ %v32, %b3 ]
>     +  %v37 = phi i8* [ %v19, %b4 ], [ %v30, %b3 ]
>     +  %v38 = bitcast i8* %v37 to <16 x i32>*
>     +  br label %b6
>     +
>     +b6:                                               ; preds = %b6, %b5
>     +  %v39 = phi <16 x i32>* [ %v108, %b6 ], [ %v27, %b5 ]
>     +  %v40 = phi <16 x i32>* [ %v115, %b6 ], [ %v35, %b5 ]
>     +  %v41 = phi <16 x i32>* [ %v114, %b6 ], [ %v36, %b5 ]
>     +  %v42 = phi <16 x i32>* [ %v113, %b6 ], [ %v38, %b5 ]
>     +  %v43 = phi i32 [ %v116, %b6 ], [ %a2, %b5 ]
>     +  %v44 = phi <16 x i32> [ %v45, %b6 ], [ %v8, %b5 ]
>     +  %v45 = phi <16 x i32> [ %v50, %b6 ], [ %v18, %b5 ]
>     +  %v46 = phi <16 x i32> [ %v47, %b6 ], [ %v8, %b5 ]
>     +  %v47 = phi <16 x i32> [ %v51, %b6 ], [ %v28, %b5 ]
>     +  %v48 = phi <16 x i32> [ %v49, %b6 ], [ %v8, %b5 ]
>     +  %v49 = phi <16 x i32> [ %v52, %b6 ], [ %v29, %b5 ]
>     +  %v50 = load <16 x i32>, <16 x i32>* %v42, align 64
>     +  %v51 = load <16 x i32>, <16 x i32>* %v41, align 64
>     +  %v52 = load <16 x i32>, <16 x i32>* %v40, align 64
>     +  %v53 = tail call <32 x i32> @llvm.hexagon.V6.vsububh(<16 x i32>
>     %v8, <16 x i32> %v47) #2
>     +  %v54 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x
>     i32> %v45, <16 x i32> %v47) #2
>     +  %v55 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x
>     i32> %v49, <16 x i32> %v47) #2
>     +  %v56 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>
>     %v54, <16 x i32> %v7) #2
>     +  %v57 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>
>     %v55, <16 x i32> %v7) #2
>     +  %v58 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>
>     %v56, <16 x i32> %v9, <16 x i32> %v10) #2
>     +  %v59 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>
>     %v57, <16 x i32> %v58, <16 x i32> %v9) #2
>     +  %v60 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>
>     %v56, <16 x i32> %v8, <16 x i32> %v45) #2
>     +  %v61 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>
>     %v57, <16 x i32> %v8, <16 x i32> %v49) #2
>     +  %v62 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>
>     %v61, <16 x i32> %v60) #2
>     +  %v63 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x
>     i32> %v53, <32 x i32> %v62, i32 -1) #2
>     +  %v64 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>
>     %v45, <16 x i32> %v44, i32 1) #2
>     +  %v65 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>
>     %v49, <16 x i32> %v48, i32 1) #2
>     +  %v66 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x
>     i32> %v64, <16 x i32> %v47) #2
>     +  %v67 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x
>     i32> %v65, <16 x i32> %v47) #2
>     +  %v68 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>
>     %v66, <16 x i32> %v7) #2
>     +  %v69 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>
>     %v67, <16 x i32> %v7) #2
>     +  %v70 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>
>     %v68, <16 x i32> %v59, <16 x i32> %v9) #2
>     +  %v71 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>
>     %v69, <16 x i32> %v70, <16 x i32> %v9) #2
>     +  %v72 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>
>     %v68, <16 x i32> %v8, <16 x i32> %v64) #2
>     +  %v73 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>
>     %v69, <16 x i32> %v8, <16 x i32> %v65) #2
>     +  %v74 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>
>     %v73, <16 x i32> %v72) #2
>     +  %v75 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x
>     i32> %v63, <32 x i32> %v74, i32 -1) #2
>     +  %v76 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>
>     %v50, <16 x i32> %v45, i32 1) #2
>     +  %v77 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>
>     %v52, <16 x i32> %v49, i32 1) #2
>     +  %v78 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x
>     i32> %v76, <16 x i32> %v47) #2
>     +  %v79 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x
>     i32> %v77, <16 x i32> %v47) #2
>     +  %v80 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>
>     %v78, <16 x i32> %v7) #2
>     +  %v81 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>
>     %v79, <16 x i32> %v7) #2
>     +  %v82 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>
>     %v80, <16 x i32> %v71, <16 x i32> %v9) #2
>     +  %v83 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>
>     %v81, <16 x i32> %v82, <16 x i32> %v9) #2
>     +  %v84 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>
>     %v80, <16 x i32> %v8, <16 x i32> %v76) #2
>     +  %v85 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>
>     %v81, <16 x i32> %v8, <16 x i32> %v77) #2
>     +  %v86 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>
>     %v85, <16 x i32> %v84) #2
>     +  %v87 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x
>     i32> %v75, <32 x i32> %v86, i32 -1) #2
>     +  %v88 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>
>     %v47, <16 x i32> %v46, i32 1) #2
>     +  %v89 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>
>     %v51, <16 x i32> %v47, i32 1) #2
>     +  %v90 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x
>     i32> %v88, <16 x i32> %v47) #2
>     +  %v91 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x
>     i32> %v89, <16 x i32> %v47) #2
>     +  %v92 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>
>     %v90, <16 x i32> %v7) #2
>     +  %v93 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>
>     %v91, <16 x i32> %v7) #2
>     +  %v94 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>
>     %v92, <16 x i32> %v83, <16 x i32> %v9) #2
>     +  %v95 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>
>     %v93, <16 x i32> %v94, <16 x i32> %v9) #2
>     +  %v96 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>
>     %v92, <16 x i32> %v8, <16 x i32> %v88) #2
>     +  %v97 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>
>     %v93, <16 x i32> %v8, <16 x i32> %v89) #2
>     +  %v98 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>
>     %v97, <16 x i32> %v96) #2
>     +  %v99 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x
>     i32> %v87, <32 x i32> %v98, i32 -1) #2
>     +  %v100 = tail call <32 x i32> @llvm.hexagon.V6.vlutvwh(<16 x i32>
>     %v95, <16 x i32> %v4, i32 0) #2
>     +  %v101 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v99) #2
>     +  %v102 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v100) #2
>     +  %v103 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x
>     i32> %v101, <16 x i32> %v102) #2
>     +  %v104 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v99) #2
>     +  %v105 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v100) #2
>     +  %v106 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x
>     i32> %v104, <16 x i32> %v105) #2
>     +  %v107 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>
>     %v106, <16 x i32> %v103) #2
>     +  %v108 = getelementptr inbounds <16 x i32>, <16 x i32>* %v39, i32 1
>     +  store <16 x i32> %v107, <16 x i32>* %v39, align 64
>     +  %v109 = icmp sgt i32 %v43, 128
>     +  %v110 = getelementptr inbounds <16 x i32>, <16 x i32>* %v42, i32 1
>     +  %v111 = getelementptr inbounds <16 x i32>, <16 x i32>* %v41, i32 1
>     +  %v112 = getelementptr inbounds <16 x i32>, <16 x i32>* %v40, i32 1
>     +  %v113 = select i1 %v109, <16 x i32>* %v110, <16 x i32>* %v42
>     +  %v114 = select i1 %v109, <16 x i32>* %v111, <16 x i32>* %v41
>     +  %v115 = select i1 %v109, <16 x i32>* %v112, <16 x i32>* %v40
>     +  %v116 = add nsw i32 %v43, -64
>     +  %v117 = icmp sgt i32 %v43, 64
>     +  br i1 %v117, label %b6, label %b7
>     +
>     +b7:                                               ; preds = %b6, %b4
>     +  %v118 = add nuw nsw i32 %v21, 1
>     +  %v119 = icmp eq i32 %v21, %v15
>     +  br i1 %v119, label %b8, label %b2
>     +
>     +b8:                                               ; preds = %b7, %b0
>     +  ret void
>     +}
>     +
>     +attributes #0 = { nounwind readnone }
>     +attributes #1 = { nounwind "target-cpu"="hexagonv60"
>     "target-features"="+hvx,+hvx-length64b" }
>     +attributes #2 = { nounwind }
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/tfr-cleanup.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/tfr-cleanup.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/tfr-cleanup.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/tfr-cleanup.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/tfr-cleanup.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,83 @@
>     +; RUN: llc -march=hexagon -O3 -hexagon-eif=0 < %s | FileCheck %s
>     +; Without TFR cleanup, the last block contained
>     +; {
>     +;   r3 = xor(r1, r2)
>     +;   r1 = #0
>     +; }
>     +; {
>     +;   r7 = r1
>     +;   r0 = zxtb(r3)
>     +; }
>     +; After TFR cleanup, the copy "r7 = r1" should be simplified to "r7
>     = #0".
>     +; There shouldn't be any register copies in that block anymore.
>     +;
>     +; CHECK: LBB0_5:
>     +; CHECK-NOT: r{{[0-9]+}} = r{{[0-9]+}}
>     +
>     +target triple = "hexagon"
>     +
>     +; Function Attrs: nounwind readnone
>     +define i64 @f0(i64 %a0, i32 %a1, i32 %a2) #0 {
>     +b0:
>     +  %v0 = trunc i64 %a0 to i32
>     +  %v1 = lshr i64 %a0, 32
>     +  %v2 = trunc i64 %v1 to i32
>     +  %v3 = lshr i64 %a0, 40
>     +  %v4 = lshr i64 %a0, 48
>     +  %v5 = trunc i64 %v4 to i16
>     +  %v6 = icmp sgt i32 %a2, %a1
>     +  %v7 = lshr i32 %v0, 10
>     +  br i1 %v6, label %b1, label %b2
>     +
>     +b1:                                               ; preds = %b0
>     +  %v8 = add nsw i32 %v7, 4190971
>     +  %v9 = and i32 %v8, 4194303
>     +  %v10 = shl nuw nsw i64 %v3, 24
>     +  %v11 = trunc i64 %v10 to i32
>     +  %v12 = ashr exact i32 %v11, 24
>     +  %v13 = or i32 %v12, 102
>     +  br label %b3
>     +
>     +b2:                                               ; preds = %b0
>     +  %v14 = add nsw i32 %v7, 4189760
>     +  %v15 = trunc i64 %v3 to i32
>     +  %v16 = or i32 %v15, 119
>     +  br label %b3
>     +
>     +b3:                                               ; preds = %b2, %b1
>     +  %v17 = phi i32 [ %v13, %b1 ], [ %v16, %b2 ]
>     +  %v18 = phi i32 [ %v9, %b1 ], [ %v14, %b2 ]
>     +  %v19 = shl i32 %v18, 10
>     +  %v20 = icmp sgt i32 %a1, %a2
>     +  br i1 %v20, label %b4, label %b5
>     +
>     +b4:                                               ; preds = %b3
>     +  %v21 = and i32 %v2, 140
>     +  %v22 = or i32 %v21, 115
>     +  %v23 = and i16 %v5, 12345
>     +  br label %b6
>     +
>     +b5:                                               ; preds = %b3
>     +  %v24 = xor i32 %v2, 23
>     +  %v25 = or i16 %v5, 12345
>     +  br label %b6
>     +
>     +b6:                                               ; preds = %b5, %b4
>     +  %v26 = phi i16 [ %v23, %b4 ], [ %v25, %b5 ]
>     +  %v27 = phi i32 [ %v22, %b4 ], [ %v24, %b5 ]
>     +  %v28 = zext i16 %v26 to i64
>     +  %v29 = shl nuw i64 %v28, 48
>     +  %v30 = and i32 %v27, 255
>     +  %v31 = zext i32 %v30 to i64
>     +  %v32 = shl nuw nsw i64 %v31, 40
>     +  %v33 = and i32 %v17, 255
>     +  %v34 = zext i32 %v33 to i64
>     +  %v35 = shl nuw nsw i64 %v34, 32
>     +  %v36 = zext i32 %v19 to i64
>     +  %v37 = or i64 %v36, %v35
>     +  %v38 = or i64 %v37, %v29
>     +  %v39 = or i64 %v38, %v32
>     +  ret i64 %v39
>     +}
>     +
>     +attributes #0 = { nounwind readnone "target-cpu"="hexagonv55" }
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu1.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu1.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu1.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu1.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu1.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,67 @@
>     +; RUN: llc -march=hexagon -disable-hexagon-shuffle=1 -O2
>     -enable-pipeliner=false < %s | FileCheck %s
>     +
>     +; Generate vmemu (unaligned).
>     +; CHECK: vmem
>     +; CHECK: vmem
>     +; CHECK: vmemu
>     +; CHECK-NOT: vmem
>     +
>     +target triple = "hexagon"
>     +
>     +; Function Attrs: nounwind
>     +define void @f0(i16* nocapture readonly %a0, i32 %a1, i32 %a2, i16*
>     nocapture %a3) #0 {
>     +b0:
>     +  %v0 = mul i32 %a2, -2
>     +  %v1 = add i32 %v0, 64
>     +  %v2 = tail call <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32>
>     undef, <16 x i32> undef)
>     +  %v3 = bitcast i16* %a3 to <16 x i32>*
>     +  %v4 = sdiv i32 %a1, 32
>     +  %v5 = icmp sgt i32 %a1, 31
>     +  br i1 %v5, label %b1, label %b4
>     +
>     +b1:                                               ; preds = %b0
>     +  %v6 = bitcast i16* %a0 to <16 x i32>*
>     +  %v7 = icmp sgt i32 %a1, 63
>     +  %v8 = mul i32 %v4, 32
>     +  %v9 = select i1 %v7, i32 %v8, i32 32
>     +  %v10 = getelementptr i16, i16* %a3, i32 %v9
>     +  br label %b2
>     +
>     +b2:                                               ; preds = %b2, %b1
>     +  %v11 = phi i32 [ 0, %b1 ], [ %v19, %b2 ]
>     +  %v12 = phi <16 x i32> [ %v2, %b1 ], [ %v16, %b2 ]
>     +  %v13 = phi <16 x i32>* [ %v3, %b1 ], [ %v18, %b2 ]
>     +  %v14 = phi <16 x i32>* [ %v6, %b1 ], [ %v15, %b2 ]
>     +  %v15 = getelementptr inbounds <16 x i32>, <16 x i32>* %v14, i32 1
>     +  %v16 = load <16 x i32>, <16 x i32>* %v14, align 4, !tbaa !0
>     +  %v17 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>
>     %v16, <16 x i32> %v12, i32 %v1)
>     +  %v18 = getelementptr inbounds <16 x i32>, <16 x i32>* %v13, i32 1
>     +  store <16 x i32> %v17, <16 x i32>* %v13, align 4, !tbaa !0
>     +  %v19 = add nsw i32 %v11, 1
>     +  %v20 = icmp slt i32 %v19, %v4
>     +  br i1 %v20, label %b2, label %b3
>     +
>     +b3:                                               ; preds = %b2
>     +  %v21 = bitcast i16* %v10 to <16 x i32>*
>     +  br label %b4
>     +
>     +b4:                                               ; preds = %b3, %b0
>     +  %v22 = phi <16 x i32> [ %v16, %b3 ], [ %v2, %b0 ]
>     +  %v23 = phi <16 x i32>* [ %v21, %b3 ], [ %v3, %b0 ]
>     +  %v24 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>
>     %v2, <16 x i32> %v22, i32 %v1)
>     +  store <16 x i32> %v24, <16 x i32>* %v23, align 4, !tbaa !0
>     +  ret void
>     +}
>     +
>     +; Function Attrs: nounwind readnone
>     +declare <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32>, <16 x i32>) #1
>     +
>     +; Function Attrs: nounwind readnone
>     +declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>,
>     i32) #1
>     +
>     +attributes #0 = { nounwind "target-cpu"="hexagonv60"
>     "target-features"="+hvxv60,+hvx-length64b" }
>     +attributes #1 = { nounwind readnone }
>     +
>     +!0 = !{!1, !1, i64 0}
>     +!1 = !{!"omnipotent char", !2, i64 0}
>     +!2 = !{!"Simple C/C++ TBAA"}
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu2.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu2.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu2.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu2.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemu2.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,32 @@
>     +; RUN: llc -march=hexagon -disable-hexagon-shuffle=0 -O2 < %s |
>     FileCheck %s
>     +
>     +; Generate vmemu (unaligned).
>     +; CHECK: vmemu
>     +; CHECK: vmemu
>     +; CHECK: vmemu
>     +; CHECK-NOT: vmem
>     +
>     +target triple = "hexagon"
>     +
>     +; Function Attrs: nounwind
>     +define void @f0(i8* nocapture readonly %a0, i8* nocapture readonly
>     %a1, i8* nocapture %a2) #0 {
>     +b0:
>     +  %v0 = bitcast i8* %a0 to <16 x i32>*
>     +  %v1 = load <16 x i32>, <16 x i32>* %v0, align 4, !tbaa !0
>     +  %v2 = bitcast i8* %a1 to <16 x i32>*
>     +  %v3 = load <16 x i32>, <16 x i32>* %v2, align 4, !tbaa !0
>     +  %v4 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v1,
>     <16 x i32> %v3)
>     +  %v5 = bitcast i8* %a2 to <16 x i32>*
>     +  store <16 x i32> %v4, <16 x i32>* %v5, align 4, !tbaa !0
>     +  ret void
>     +}
>     +
>     +; Function Attrs: nounwind readnone
>     +declare <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32>, <16 x i32>) #1
>     +
>     +attributes #0 = { nounwind "target-cpu"="hexagonv60"
>     "target-features"="+hvxv60,+hvx-length64b" }
>     +attributes #1 = { nounwind readnone }
>     +
>     +!0 = !{!1, !1, i64 0}
>     +!1 = !{!"omnipotent char", !2, i64 0}
>     +!2 = !{!"Simple C/C++ TBAA"}
> 
>     Added: llvm/trunk/test/CodeGen/Hexagon/v6vec_inc1.ll
>     URL:
>     http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vec_inc1.ll?rev=327884&view=auto
>     <http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/v6vec_inc1.ll?rev=327884&view=auto>
>     ==============================================================================
>     --- llvm/trunk/test/CodeGen/Hexagon/v6vec_inc1.ll (added)
>     +++ llvm/trunk/test/CodeGen/Hexagon/v6vec_inc1.ll Mon Mar 19
>     12:03:18 2018
>     @@ -0,0 +1,73 @@
>     +; RUN: llc -march=hexagon -O2 -enable-pipeliner=false < %s |
>     FileCheck %s
>     +; RUN: llc -march=hexagon -O2 -debug-only=pipeliner < %s -o - 2>&1
>     | FileCheck %s --check-prefix=CHECK-SWP
>     +
>     +; CHECK: {
>     +; CHECK-DAG: v{{[0-9]*}} = vmem(r{{[0-9]*}}++#1)
>     +; CHECK-DAG: vmem(r{{[0-9]*}}++#1) = v{{[0-9]*}}.new
>     +; CHECK: }{{[ \t]*}}:endloop0
>     +
>     +; CHECK-SWP: Schedule Found? 1
>     +; CHECK-SWP: {
>     +; CHECK-DAG-SWP: v{{[0-9]*}}.cur = vmem(r{{[0-9]*}}++#1)
>     +; CHECK-DAG-SWP: vmem(r{{[0-9]*}}++#1) = v{{[0-9]*}}.new
>     +; CHECK-SWP: }{{[ \t]*}}:endloop0
>     +
>     +target triple = "hexagon"
>     +
>     +; Function Attrs: nounwind
>     +define void @f0(i16* nocapture readonly %a0, i32 %a1, i32 %a2, i16*
>     nocapture %a3) #0 {
>     +b0:
>     +  %v0 = mul i32 %a2, -2
>     +  %v1 = add i32 %v0, 64
>     +  %v2 = tail call <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32>
>     undef, <16 x i32> undef)
>     +  %v3 = bitcast i16* %a3 to <16 x i32>*
>     +  %v4 = sdiv i32 %a1, 32
>     +  %v5 = icmp sgt i32 %a1, 31
>     +  br i1 %v5, label %b1, label %b4
>     +
>     +b1:                                               ; preds = %b0
>     +  %v6 = bitcast i16* %a0 to <16 x i32>*
>     +  %v7 = icmp sgt i32 %a1, 63
>     +  %v8 = mul i32 %v4, 32
>     +  %v9 = select i1 %v7, i32 %v8, i32 32
>     +  %v10 = getelementptr i16, i16* %a3, i32 %v9
>     +  br label %b2
>     +
>     +b2:                                               ; preds = %b2, %b1
>     +  %v11 = phi i32 [ 0, %b1 ], [ %v19, %b2 ]
>     +  %v12 = phi <16 x i32> [ %v2, %b1 ], [ %v16, %b2 ]
>     +  %v13 = phi <16 x i32>* [ %v3, %b1 ], [ %v18, %b2 ]
>     +  %v14 = phi <16 x i32>* [ %v6, %b1 ], [ %v15, %b2 ]
>     +  %v15 = getelementptr inbounds <16 x i32>, <16 x i32>* %v14, i32 1
>     +  %v16 = load <16 x i32>, <16 x i32>* %v14, align 64, !tbaa !0
>     +  %v17 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>
>     %v16, <16 x i32> %v12, i32 %v1)
>     +  %v18 = getelementptr inbounds <16 x i32>, <16 x i32>* %v13, i32 1
>     +  store <16 x i32> %v17, <16 x i32>* %v13, align 64, !tbaa !0
>     +  %v19 = add nsw i32 %v11, 1
>     +  %v20 = icmp slt i32 %v19, %v4
>     +  br i1 %v20, label %b2, label %b3
>     +
>     +b3:                                               ; preds = %b2
>     +  %v21 = bitcast i16* %v10 to <16 x i32>*
>     +  br label %b4
>     +
>     +b4:                                               ; preds = %b3, %b0
>     +  %v22 = phi <16 x i32> [ %v16, %b3 ], [ %v2, %b0 ]
>     +  %v23 = phi <16 x i32>* [ %v21, %b3 ], [ %v3, %b0 ]
>     +  %v24 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>
>     %v2, <16 x i32> %v22, i32 %v1)
>     +  store <16 x i32> %v24, <16 x i32>* %v23, align 64, !tbaa !0
>     +  ret void
>     +}
>     +
>     +; Function Attrs: nounwind readnone
>     +declare <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32>, <16 x i32>) #1
>     +
>     +; Function Attrs: nounwind readnone
>     +declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>,
>     i32) #1
>     +
>     +attributes #0 = { nounwind "target-cpu"="hexagonv60"
>     "target-features"="+hvxv60,+hvx-length64b" }
>     +attributes #1 = { nounwind readnone }
>     +
>     +!0 = !{!1, !1, i64 0}
>     +!1 = !{!"omnipotent char", !2, i64 0}
>     +!2 = !{!"Simple C/C++ TBAA"}
> 
> 
>     _______________________________________________
>     llvm-commits mailing list
>     llvm-commits at lists.llvm.org <mailto:llvm-commits at lists.llvm.org>
>     http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
>     <http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits>
> 
> 

-- 
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, 
hosted by The Linux Foundation


More information about the llvm-commits mailing list