[llvm] ab9521a - [Hexagon] Use 'vnot' instead of 'not' in patterns with vectors

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 22 13:36:54 PDT 2021


Author: Krzysztof Parzyszek
Date: 2021-04-22T15:36:20-05:00
New Revision: ab9521aaebc51de677741343184da768ee596c08

URL: https://github.com/llvm/llvm-project/commit/ab9521aaebc51de677741343184da768ee596c08
DIFF: https://github.com/llvm/llvm-project/commit/ab9521aaebc51de677741343184da768ee596c08.diff

LOG: [Hexagon] Use 'vnot' instead of 'not' in patterns with vectors

'not' expands to checking for an xor with a -1 constant. Since
this looks for a ConstantSDNode it will never match for a vector.

Co-authored-by: Craig Topper <craig.topper at sifive.com>

Differential Revision: https://reviews.llvm.org/D100687

Added: 
    llvm/test/CodeGen/Hexagon/autohvx/logical-128b.ll
    llvm/test/CodeGen/Hexagon/autohvx/logical-64b.ll
    llvm/test/CodeGen/Hexagon/isel/logical.ll

Modified: 
    llvm/lib/Target/Hexagon/HexagonPatterns.td
    llvm/lib/Target/Hexagon/HexagonPatternsHVX.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/Hexagon/HexagonPatterns.td b/llvm/lib/Target/Hexagon/HexagonPatterns.td
index e0aa9a21548d..f4223b74c900 100644
--- a/llvm/lib/Target/Hexagon/HexagonPatterns.td
+++ b/llvm/lib/Target/Hexagon/HexagonPatterns.td
@@ -293,6 +293,8 @@ class pf2<SDNode Op> : PatFrag<(ops node:$a, node:$b), (Op node:$a, node:$b)>;
 
 class Not2<PatFrag P>
   : PatFrag<(ops node:$A, node:$B), (P node:$A, (not node:$B))>;
+class VNot2<PatFrag P, PatFrag Not>
+  : PatFrag<(ops node:$A, node:$B), (P node:$A, (Not node:$B))>;
 
 // If there is a constant operand that feeds the and/or instruction,
 // do not generate the compound instructions.
@@ -564,37 +566,50 @@ def: Pat<(pnot V4I1:$Ps),   (C2_not V4I1:$Ps)>;
 def: Pat<(pnot V8I1:$Ps),   (C2_not V8I1:$Ps)>;
 def: Pat<(add I1:$Ps, -1),  (C2_not I1:$Ps)>;
 
-multiclass BoolOpR_RR_pat<InstHexagon MI, PatFrag Op> {
-  def: OpR_RR_pat<MI, Op,   i1,   I1>;
-  def: OpR_RR_pat<MI, Op, v2i1, V2I1>;
-  def: OpR_RR_pat<MI, Op, v4i1, V4I1>;
-  def: OpR_RR_pat<MI, Op, v8i1, V8I1>;
+def: OpR_RR_pat<C2_and,         And, i1, I1>;
+def: OpR_RR_pat<C2_or,           Or, i1, I1>;
+def: OpR_RR_pat<C2_xor,         Xor, i1, I1>;
+def: OpR_RR_pat<C2_andn,  Not2<And>, i1, I1>;
+def: OpR_RR_pat<C2_orn,    Not2<Or>, i1, I1>;
+
+def: AccRRR_pat<C4_and_and,   And,       Su<And>, I1, I1, I1>;
+def: AccRRR_pat<C4_and_or,    And,       Su< Or>, I1, I1, I1>;
+def: AccRRR_pat<C4_or_and,     Or,       Su<And>, I1, I1, I1>;
+def: AccRRR_pat<C4_or_or,      Or,       Su< Or>, I1, I1, I1>;
+def: AccRRR_pat<C4_and_andn,  And, Su<Not2<And>>, I1, I1, I1>;
+def: AccRRR_pat<C4_and_orn,   And, Su<Not2< Or>>, I1, I1, I1>;
+def: AccRRR_pat<C4_or_andn,    Or, Su<Not2<And>>, I1, I1, I1>;
+def: AccRRR_pat<C4_or_orn,     Or, Su<Not2< Or>>, I1, I1, I1>;
+
+multiclass BoolvOpR_RR_pat<InstHexagon MI, PatFrag VOp> {
+  def: OpR_RR_pat<MI, VOp, v2i1, V2I1>;
+  def: OpR_RR_pat<MI, VOp, v4i1, V4I1>;
+  def: OpR_RR_pat<MI, VOp, v8i1, V8I1>;
 }
 
-multiclass BoolAccRRR_pat<InstHexagon MI, PatFrag AccOp, PatFrag Op> {
-  def: AccRRR_pat<MI, AccOp, Op,   I1,   I1,   I1>;
-  def: AccRRR_pat<MI, AccOp, Op, V2I1, V2I1, V2I1>;
-  def: AccRRR_pat<MI, AccOp, Op, V4I1, V4I1, V4I1>;
-  def: AccRRR_pat<MI, AccOp, Op, V8I1, V8I1, V8I1>;
+multiclass BoolvAccRRR_pat<InstHexagon MI, PatFrag AccOp, PatFrag VOp> {
+  def: AccRRR_pat<MI, AccOp, VOp, V2I1, V2I1, V2I1>;
+  def: AccRRR_pat<MI, AccOp, VOp, V4I1, V4I1, V4I1>;
+  def: AccRRR_pat<MI, AccOp, VOp, V8I1, V8I1, V8I1>;
 }
 
-defm: BoolOpR_RR_pat<C2_and,   And>;
-defm: BoolOpR_RR_pat<C2_or,    Or>;
-defm: BoolOpR_RR_pat<C2_xor,   Xor>;
-defm: BoolOpR_RR_pat<C2_andn,  Not2<And>>;
-defm: BoolOpR_RR_pat<C2_orn,   Not2<Or>>;
+defm: BoolvOpR_RR_pat<C2_and,                    And>;
+defm: BoolvOpR_RR_pat<C2_or,                      Or>;
+defm: BoolvOpR_RR_pat<C2_xor,                    Xor>;
+defm: BoolvOpR_RR_pat<C2_andn,      VNot2<And, pnot>>;
+defm: BoolvOpR_RR_pat<C2_orn,       VNot2< Or, pnot>>;
 
 // op(Ps, op(Pt, Pu))
-defm: BoolAccRRR_pat<C4_and_and,   And, Su<And>>;
-defm: BoolAccRRR_pat<C4_and_or,    And, Su<Or>>;
-defm: BoolAccRRR_pat<C4_or_and,    Or,  Su<And>>;
-defm: BoolAccRRR_pat<C4_or_or,     Or,  Su<Or>>;
-
-// op(Ps, op(Pt, ~Pu))
-defm: BoolAccRRR_pat<C4_and_andn,  And, Su<Not2<And>>>;
-defm: BoolAccRRR_pat<C4_and_orn,   And, Su<Not2<Or>>>;
-defm: BoolAccRRR_pat<C4_or_andn,   Or,  Su<Not2<And>>>;
-defm: BoolAccRRR_pat<C4_or_orn,    Or,  Su<Not2<Or>>>;
+defm: BoolvAccRRR_pat<C4_and_and,   And, Su<And>>;
+defm: BoolvAccRRR_pat<C4_and_or,    And, Su<Or>>;
+defm: BoolvAccRRR_pat<C4_or_and,    Or,  Su<And>>;
+defm: BoolvAccRRR_pat<C4_or_or,     Or,  Su<Or>>;
+
+// op(Ps, op(Pt, !Pu))
+defm: BoolvAccRRR_pat<C4_and_andn,  And, Su<VNot2<And, pnot>>>;
+defm: BoolvAccRRR_pat<C4_and_orn,   And, Su<VNot2< Or, pnot>>>;
+defm: BoolvAccRRR_pat<C4_or_andn,   Or,  Su<VNot2<And, pnot>>>;
+defm: BoolvAccRRR_pat<C4_or_orn,    Or,  Su<VNot2< Or, pnot>>>;
 
 
 // --(5) Compare ---------------------------------------------------------

diff  --git a/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td b/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
index c2875bc8b1c0..a22a3f8ec0ca 100644
--- a/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
+++ b/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
@@ -566,32 +566,32 @@ let Predicates = [UseHVX] in {
   def: Pat<(qnot HQ16:$Qs), (V6_pred_not HvxQR:$Qs)>;
   def: Pat<(qnot HQ32:$Qs), (V6_pred_not HvxQR:$Qs)>;
 
-  def: OpR_RR_pat<V6_pred_and,         And,  VecQ8,   HQ8>;
-  def: OpR_RR_pat<V6_pred_and,         And, VecQ16,  HQ16>;
-  def: OpR_RR_pat<V6_pred_and,         And, VecQ32,  HQ32>;
-  def: OpR_RR_pat<V6_pred_or,           Or,  VecQ8,   HQ8>;
-  def: OpR_RR_pat<V6_pred_or,           Or, VecQ16,  HQ16>;
-  def: OpR_RR_pat<V6_pred_or,           Or, VecQ32,  HQ32>;
-  def: OpR_RR_pat<V6_pred_xor,         Xor,  VecQ8,   HQ8>;
-  def: OpR_RR_pat<V6_pred_xor,         Xor, VecQ16,  HQ16>;
-  def: OpR_RR_pat<V6_pred_xor,         Xor, VecQ32,  HQ32>;
-
-  def: OpR_RR_pat<V6_pred_and_n, Not2<And>,  VecQ8,   HQ8>;
-  def: OpR_RR_pat<V6_pred_and_n, Not2<And>, VecQ16,  HQ16>;
-  def: OpR_RR_pat<V6_pred_and_n, Not2<And>, VecQ32,  HQ32>;
-  def: OpR_RR_pat<V6_pred_or_n,   Not2<Or>,  VecQ8,   HQ8>;
-  def: OpR_RR_pat<V6_pred_or_n,   Not2<Or>, VecQ16,  HQ16>;
-  def: OpR_RR_pat<V6_pred_or_n,   Not2<Or>, VecQ32,  HQ32>;
-
-  def: OpR_RR_pat<V6_veqb,              seteq,  VecQ8,  HVI8>;
-  def: OpR_RR_pat<V6_veqh,              seteq, VecQ16, HVI16>;
-  def: OpR_RR_pat<V6_veqw,              seteq, VecQ32, HVI32>;
-  def: OpR_RR_pat<V6_vgtb,              setgt,  VecQ8,  HVI8>;
-  def: OpR_RR_pat<V6_vgth,              setgt, VecQ16, HVI16>;
-  def: OpR_RR_pat<V6_vgtw,              setgt, VecQ32, HVI32>;
-  def: OpR_RR_pat<V6_vgtub,            setugt,  VecQ8,  HVI8>;
-  def: OpR_RR_pat<V6_vgtuh,            setugt, VecQ16, HVI16>;
-  def: OpR_RR_pat<V6_vgtuw,            setugt, VecQ32, HVI32>;
+  def: OpR_RR_pat<V6_pred_and,  And,  VecQ8,   HQ8>;
+  def: OpR_RR_pat<V6_pred_and,  And, VecQ16,  HQ16>;
+  def: OpR_RR_pat<V6_pred_and,  And, VecQ32,  HQ32>;
+  def: OpR_RR_pat<V6_pred_or,    Or,  VecQ8,   HQ8>;
+  def: OpR_RR_pat<V6_pred_or,    Or, VecQ16,  HQ16>;
+  def: OpR_RR_pat<V6_pred_or,    Or, VecQ32,  HQ32>;
+  def: OpR_RR_pat<V6_pred_xor,  Xor,  VecQ8,   HQ8>;
+  def: OpR_RR_pat<V6_pred_xor,  Xor, VecQ16,  HQ16>;
+  def: OpR_RR_pat<V6_pred_xor,  Xor, VecQ32,  HQ32>;
+
+  def: OpR_RR_pat<V6_pred_and_n,  VNot2<And, qnot>,  VecQ8,   HQ8>;
+  def: OpR_RR_pat<V6_pred_and_n,  VNot2<And, qnot>, VecQ16,  HQ16>;
+  def: OpR_RR_pat<V6_pred_and_n,  VNot2<And, qnot>, VecQ32,  HQ32>;
+  def: OpR_RR_pat<V6_pred_or_n,    VNot2<Or, qnot>,  VecQ8,   HQ8>;
+  def: OpR_RR_pat<V6_pred_or_n,    VNot2<Or, qnot>, VecQ16,  HQ16>;
+  def: OpR_RR_pat<V6_pred_or_n,    VNot2<Or, qnot>, VecQ32,  HQ32>;
+
+  def: OpR_RR_pat<V6_veqb,      seteq,  VecQ8,  HVI8>;
+  def: OpR_RR_pat<V6_veqh,      seteq, VecQ16, HVI16>;
+  def: OpR_RR_pat<V6_veqw,      seteq, VecQ32, HVI32>;
+  def: OpR_RR_pat<V6_vgtb,      setgt,  VecQ8,  HVI8>;
+  def: OpR_RR_pat<V6_vgth,      setgt, VecQ16, HVI16>;
+  def: OpR_RR_pat<V6_vgtw,      setgt, VecQ32, HVI32>;
+  def: OpR_RR_pat<V6_vgtub,    setugt,  VecQ8,  HVI8>;
+  def: OpR_RR_pat<V6_vgtuh,    setugt, VecQ16, HVI16>;
+  def: OpR_RR_pat<V6_vgtuw,    setugt, VecQ32, HVI32>;
 
   def: AccRRR_pat<V6_veqb_and,    And,  seteq,    HQ8,  HVI8,  HVI8>;
   def: AccRRR_pat<V6_veqb_or,      Or,  seteq,    HQ8,  HVI8,  HVI8>;

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/logical-128b.ll b/llvm/test/CodeGen/Hexagon/autohvx/logical-128b.ll
new file mode 100644
index 000000000000..91f56d75fb12
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/autohvx/logical-128b.ll
@@ -0,0 +1,483 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+declare <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1>, i32) #0
+declare <32 x i1> @llvm.hexagon.V6.pred.typecast.128B.v32i1.v128i1(<128 x i1>) #0
+declare <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v32i1(<32 x i1>) #0
+declare <64 x i1> @llvm.hexagon.V6.pred.typecast.128B.v64i1.v128i1(<128 x i1>) #0
+declare <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v64i1(<64 x i1>) #0
+
+define <32 x i32> @f0(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f0:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.128B.v32i1.v128i1(<128 x i1> %v0)
+  %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v3 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.128B.v32i1.v128i1(<128 x i1> %v2)
+  %v4 = and <32 x i1> %v1, %v3
+  %v5 = call <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v32i1(<32 x i1> %v4)
+  %v6 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v5, i32 -1)
+  ret <32 x i32> %v6
+}
+
+define <32 x i32> @f1(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f1:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.128B.v32i1.v128i1(<128 x i1> %v0)
+  %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v3 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.128B.v32i1.v128i1(<128 x i1> %v2)
+  %v4 = or <32 x i1> %v1, %v3
+  %v5 = call <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v32i1(<32 x i1> %v4)
+  %v6 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v5, i32 -1)
+  ret <32 x i32> %v6
+}
+
+define <32 x i32> @f2(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f2:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = xor(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.128B.v32i1.v128i1(<128 x i1> %v0)
+  %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v3 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.128B.v32i1.v128i1(<128 x i1> %v2)
+  %v4 = xor <32 x i1> %v1, %v3
+  %v5 = call <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v32i1(<32 x i1> %v4)
+  %v6 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v5, i32 -1)
+  ret <32 x i32> %v6
+}
+
+define <32 x i32> @f3(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f3:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.128B.v32i1.v128i1(<128 x i1> %v0)
+  %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v3 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.128B.v32i1.v128i1(<128 x i1> %v2)
+  %v4 = xor <32 x i1> %v3, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v5 = and <32 x i1> %v1, %v4
+  %v6 = call <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v32i1(<32 x i1> %v5)
+  %v7 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v6, i32 -1)
+  ret <32 x i32> %v7
+}
+
+define <32 x i32> @f4(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f4:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.128B.v32i1.v128i1(<128 x i1> %v0)
+  %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v3 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.128B.v32i1.v128i1(<128 x i1> %v2)
+  %v4 = xor <32 x i1> %v3, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v5 = or <32 x i1> %v1, %v4
+  %v6 = call <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v32i1(<32 x i1> %v5)
+  %v7 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v6, i32 -1)
+  ret <32 x i32> %v7
+}
+
+define <32 x i32> @f5(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f5:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.128B.v64i1.v128i1(<128 x i1> %v0)
+  %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v3 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.128B.v64i1.v128i1(<128 x i1> %v2)
+  %v4 = and <64 x i1> %v1, %v3
+  %v5 = call <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v64i1(<64 x i1> %v4)
+  %v6 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v5, i32 -1)
+  ret <32 x i32> %v6
+}
+
+define <32 x i32> @f6(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f6:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.128B.v64i1.v128i1(<128 x i1> %v0)
+  %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v3 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.128B.v64i1.v128i1(<128 x i1> %v2)
+  %v4 = or <64 x i1> %v1, %v3
+  %v5 = call <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v64i1(<64 x i1> %v4)
+  %v6 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v5, i32 -1)
+  ret <32 x i32> %v6
+}
+
+define <32 x i32> @f7(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f7:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = xor(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.128B.v64i1.v128i1(<128 x i1> %v0)
+  %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v3 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.128B.v64i1.v128i1(<128 x i1> %v2)
+  %v4 = xor <64 x i1> %v1, %v3
+  %v5 = call <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v64i1(<64 x i1> %v4)
+  %v6 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v5, i32 -1)
+  ret <32 x i32> %v6
+}
+
+define <32 x i32> @f8(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f8:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.128B.v64i1.v128i1(<128 x i1> %v0)
+  %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v3 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.128B.v64i1.v128i1(<128 x i1> %v2)
+  %v4 = xor <64 x i1> %v3, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v5 = and <64 x i1> %v1, %v4
+  %v6 = call <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v64i1(<64 x i1> %v5)
+  %v7 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v6, i32 -1)
+  ret <32 x i32> %v7
+}
+
+define <32 x i32> @f9(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f9:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.128B.v64i1.v128i1(<128 x i1> %v0)
+  %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v3 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.128B.v64i1.v128i1(<128 x i1> %v2)
+  %v4 = xor <64 x i1> %v3, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v5 = or <64 x i1> %v1, %v4
+  %v6 = call <128 x i1> @llvm.hexagon.V6.pred.typecast.128B.v128i1.v64i1(<64 x i1> %v5)
+  %v7 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v6, i32 -1)
+  ret <32 x i32> %v7
+}
+
+define <32 x i32> @f10(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f10:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v2 = and <128 x i1> %v0, %v1
+  %v3 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v2, i32 -1)
+  ret <32 x i32> %v3
+}
+
+define <32 x i32> @f11(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f11:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v2 = or <128 x i1> %v0, %v1
+  %v3 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v2, i32 -1)
+  ret <32 x i32> %v3
+}
+
+define <32 x i32> @f12(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f12:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = xor(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v2 = xor <128 x i1> %v0, %v1
+  %v3 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v2, i32 -1)
+  ret <32 x i32> %v3
+}
+
+define <32 x i32> @f13(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f13:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v2 = xor <128 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v3 = and <128 x i1> %v0, %v2
+  %v4 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v3, i32 -1)
+  ret <32 x i32> %v4
+}
+
+define <32 x i32> @f14(<32 x i32> %a0, <32 x i32> %a1) #1 {
+; CHECK-LABEL: f14:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a0, i32 -1)
+  %v1 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %a1, i32 -1)
+  %v2 = xor <128 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v3 = or <128 x i1> %v0, %v2
+  %v4 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v3, i32 -1)
+  ret <32 x i32> %v4
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind "target-cpu"="hexagonv66" "target-features"="+hvxv66,+hvx-length128b,-packets" }

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/logical-64b.ll b/llvm/test/CodeGen/Hexagon/autohvx/logical-64b.ll
new file mode 100644
index 000000000000..e782b4da0852
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/autohvx/logical-64b.ll
@@ -0,0 +1,483 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+declare <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1>, i32) #0
+declare <16 x i1> @llvm.hexagon.V6.pred.typecast.v16i1.v64i1(<64 x i1>) #0
+declare <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v16i1(<16 x i1>) #0
+declare <32 x i1> @llvm.hexagon.V6.pred.typecast.v32i1.v64i1(<64 x i1>) #0
+declare <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v32i1(<32 x i1>) #0
+
+define <16 x i32> @f0(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f0:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <16 x i1> @llvm.hexagon.V6.pred.typecast.v16i1.v64i1(<64 x i1> %v0)
+  %v2 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v3 = call <16 x i1> @llvm.hexagon.V6.pred.typecast.v16i1.v64i1(<64 x i1> %v2)
+  %v4 = and <16 x i1> %v1, %v3
+  %v5 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v16i1(<16 x i1> %v4)
+  %v6 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v5, i32 -1)
+  ret <16 x i32> %v6
+}
+
+define <16 x i32> @f1(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f1:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <16 x i1> @llvm.hexagon.V6.pred.typecast.v16i1.v64i1(<64 x i1> %v0)
+  %v2 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v3 = call <16 x i1> @llvm.hexagon.V6.pred.typecast.v16i1.v64i1(<64 x i1> %v2)
+  %v4 = or <16 x i1> %v1, %v3
+  %v5 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v16i1(<16 x i1> %v4)
+  %v6 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v5, i32 -1)
+  ret <16 x i32> %v6
+}
+
+define <16 x i32> @f2(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f2:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = xor(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <16 x i1> @llvm.hexagon.V6.pred.typecast.v16i1.v64i1(<64 x i1> %v0)
+  %v2 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v3 = call <16 x i1> @llvm.hexagon.V6.pred.typecast.v16i1.v64i1(<64 x i1> %v2)
+  %v4 = xor <16 x i1> %v1, %v3
+  %v5 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v16i1(<16 x i1> %v4)
+  %v6 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v5, i32 -1)
+  ret <16 x i32> %v6
+}
+
+define <16 x i32> @f3(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f3:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <16 x i1> @llvm.hexagon.V6.pred.typecast.v16i1.v64i1(<64 x i1> %v0)
+  %v2 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v3 = call <16 x i1> @llvm.hexagon.V6.pred.typecast.v16i1.v64i1(<64 x i1> %v2)
+  %v4 = xor <16 x i1> %v3, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v5 = and <16 x i1> %v1, %v4
+  %v6 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v16i1(<16 x i1> %v5)
+  %v7 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v6, i32 -1)
+  ret <16 x i32> %v7
+}
+
+define <16 x i32> @f4(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f4:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <16 x i1> @llvm.hexagon.V6.pred.typecast.v16i1.v64i1(<64 x i1> %v0)
+  %v2 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v3 = call <16 x i1> @llvm.hexagon.V6.pred.typecast.v16i1.v64i1(<64 x i1> %v2)
+  %v4 = xor <16 x i1> %v3, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v5 = or <16 x i1> %v1, %v4
+  %v6 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v16i1(<16 x i1> %v5)
+  %v7 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v6, i32 -1)
+  ret <16 x i32> %v7
+}
+
+define <16 x i32> @f5(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f5:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.v32i1.v64i1(<64 x i1> %v0)
+  %v2 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v3 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.v32i1.v64i1(<64 x i1> %v2)
+  %v4 = and <32 x i1> %v1, %v3
+  %v5 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v32i1(<32 x i1> %v4)
+  %v6 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v5, i32 -1)
+  ret <16 x i32> %v6
+}
+
+define <16 x i32> @f6(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f6:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.v32i1.v64i1(<64 x i1> %v0)
+  %v2 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v3 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.v32i1.v64i1(<64 x i1> %v2)
+  %v4 = or <32 x i1> %v1, %v3
+  %v5 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v32i1(<32 x i1> %v4)
+  %v6 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v5, i32 -1)
+  ret <16 x i32> %v6
+}
+
+define <16 x i32> @f7(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f7:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = xor(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.v32i1.v64i1(<64 x i1> %v0)
+  %v2 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v3 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.v32i1.v64i1(<64 x i1> %v2)
+  %v4 = xor <32 x i1> %v1, %v3
+  %v5 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v32i1(<32 x i1> %v4)
+  %v6 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v5, i32 -1)
+  ret <16 x i32> %v6
+}
+
+define <16 x i32> @f8(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f8:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.v32i1.v64i1(<64 x i1> %v0)
+  %v2 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v3 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.v32i1.v64i1(<64 x i1> %v2)
+  %v4 = xor <32 x i1> %v3, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v5 = and <32 x i1> %v1, %v4
+  %v6 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v32i1(<32 x i1> %v5)
+  %v7 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v6, i32 -1)
+  ret <16 x i32> %v7
+}
+
+define <16 x i32> @f9(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f9:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.v32i1.v64i1(<64 x i1> %v0)
+  %v2 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v3 = call <32 x i1> @llvm.hexagon.V6.pred.typecast.v32i1.v64i1(<64 x i1> %v2)
+  %v4 = xor <32 x i1> %v3, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v5 = or <32 x i1> %v1, %v4
+  %v6 = call <64 x i1> @llvm.hexagon.V6.pred.typecast.v64i1.v32i1(<32 x i1> %v5)
+  %v7 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v6, i32 -1)
+  ret <16 x i32> %v7
+}
+
+define <16 x i32> @f10(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f10:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v2 = and <64 x i1> %v0, %v1
+  %v3 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
+  ret <16 x i32> %v3
+}
+
+define <16 x i32> @f11(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f11:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v2 = or <64 x i1> %v0, %v1
+  %v3 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
+  ret <16 x i32> %v3
+}
+
+define <16 x i32> @f12(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f12:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = xor(q0,q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v2 = xor <64 x i1> %v0, %v1
+  %v3 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v2, i32 -1)
+  ret <16 x i32> %v3
+}
+
+define <16 x i32> @f13(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f13:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = and(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v2 = xor <64 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v3 = and <64 x i1> %v0, %v2
+  %v4 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v3, i32 -1)
+  ret <16 x i32> %v4
+}
+
+define <16 x i32> @f14(<16 x i32> %a0, <16 x i32> %a1) #1 {
+; CHECK-LABEL: f14:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #-1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q1 = vand(v1,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = or(q0,!q1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a0, i32 -1)
+  %v1 = call <64 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a1, i32 -1)
+  %v2 = xor <64 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v3 = or <64 x i1> %v0, %v2
+  %v4 = call <16 x i32> @llvm.hexagon.V6.vandqrt(<64 x i1> %v3, i32 -1)
+  ret <16 x i32> %v4
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind "target-cpu"="hexagonv66" "target-features"="+hvxv66,+hvx-length64b,-packets" }

diff  --git a/llvm/test/CodeGen/Hexagon/isel/logical.ll b/llvm/test/CodeGen/Hexagon/isel/logical.ll
new file mode 100644
index 000000000000..6c6ac7e413d6
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/isel/logical.ll
@@ -0,0 +1,1897 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+declare i32 @llvm.hexagon.S2.tstbit.i(i32, i32 immarg) #0
+
+define i1 @f0(i32 %a0, i32 %a1) #1 {
+; CHECK-LABEL: f0:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r2,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = and i1 %v1, %v3
+  ret i1 %v4
+}
+
+define i1 @f1(i32 %a0, i32 %a1) #1 {
+; CHECK-LABEL: f1:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r2,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = or i1 %v1, %v3
+  ret i1 %v4
+}
+
+define i1 @f2(i32 %a0, i32 %a1) #1 {
+; CHECK-LABEL: f2:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = xor(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r2,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = xor i1 %v1, %v3
+  ret i1 %v4
+}
+
+define i1 @f3(i32 %a0, i32 %a1) #1 {
+; CHECK-LABEL: f3:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1 = p1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p0,!p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = xor i1 %v3, true
+  %v5 = and i1 %v1, %v4
+  ret i1 %v5
+}
+
+define i1 @f4(i32 %a0, i32 %a1) #1 {
+; CHECK-LABEL: f4:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1 = p1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p0,!p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = xor i1 %v3, true
+  %v5 = or i1 %v1, %v4
+  ret i1 %v5
+}
+
+define i1 @f5(i32 %a0, i32 %a1, i32 %a2) #1 {
+; CHECK-LABEL: f5:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,and(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r2,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
+  %v5 = trunc i32 %v4 to i1
+  %v6 = and i1 %v1, %v3
+  %v7 = and i1 %v5, %v6
+  ret i1 %v7
+}
+
+define i1 @f6(i32 %a0, i32 %a1, i32 %a2) #1 {
+; CHECK-LABEL: f6:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,or(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r2,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
+  %v5 = trunc i32 %v4 to i1
+  %v6 = or i1 %v1, %v3
+  %v7 = and i1 %v5, %v6
+  ret i1 %v7
+}
+
+define i1 @f7(i32 %a0, i32 %a1, i32 %a2) #1 {
+; CHECK-LABEL: f7:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,and(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r2,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
+  %v5 = trunc i32 %v4 to i1
+  %v6 = and i1 %v1, %v3
+  %v7 = or i1 %v5, %v6
+  ret i1 %v7
+}
+
+define i1 @f8(i32 %a0, i32 %a1, i32 %a2) #1 {
+; CHECK-LABEL: f8:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,or(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r2,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
+  %v5 = trunc i32 %v4 to i1
+  %v6 = or i1 %v1, %v3
+  %v7 = or i1 %v5, %v6
+  ret i1 %v7
+}
+
+define i1 @f9(i32 %a0, i32 %a1, i32 %a2) #1 {
+; CHECK-LABEL: f9:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1 = p1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = p2
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,and(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = xor i1 %v3, true
+  %v5 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
+  %v6 = trunc i32 %v5 to i1
+  %v7 = and i1 %v1, %v4
+  %v8 = and i1 %v6, %v7
+  ret i1 %v8
+}
+
+define i1 @f10(i32 %a0, i32 %a1, i32 %a2) #1 {
+; CHECK-LABEL: f10:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1 = p1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = p2
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,or(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = xor i1 %v3, true
+  %v5 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
+  %v6 = trunc i32 %v5 to i1
+  %v7 = or i1 %v1, %v4
+  %v8 = and i1 %v6, %v7
+  ret i1 %v8
+}
+
+define i1 @f11(i32 %a0, i32 %a1, i32 %a2) #1 {
+; CHECK-LABEL: f11:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1 = p1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = p2
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,and(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = xor i1 %v3, true
+  %v5 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
+  %v6 = trunc i32 %v5 to i1
+  %v7 = and i1 %v1, %v4
+  %v8 = or i1 %v6, %v7
+  ret i1 %v8
+}
+
+define i1 @f12(i32 %a0, i32 %a1, i32 %a2) #1 {
+; CHECK-LABEL: f12:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#3)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1 = p1
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = p2
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = tstbit(r0,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = tstbit(r1,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = tstbit(r2,#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,or(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = p0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a0, i32 3)
+  %v1 = trunc i32 %v0 to i1
+  %v2 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a1, i32 3)
+  %v3 = trunc i32 %v2 to i1
+  %v4 = xor i1 %v3, true
+  %v5 = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a2, i32 3)
+  %v6 = trunc i32 %v5 to i1
+  %v7 = or i1 %v1, %v4
+  %v8 = or i1 %v6, %v7
+  ret i1 %v8
+}
+
+define <2 x i32> @f13(<2 x i32> %a0, <2 x i32> %a1) #1 {
+; CHECK-LABEL: f13:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5:4 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = and <2 x i1> %v0, %v1
+  %v3 = sext <2 x i1> %v2 to <2 x i32>
+  ret <2 x i32> %v3
+}
+
+define <2 x i32> @f14(<2 x i32> %a0, <2 x i32> %a1) #1 {
+; CHECK-LABEL: f14:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5:4 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = or <2 x i1> %v0, %v1
+  %v3 = sext <2 x i1> %v2 to <2 x i32>
+  ret <2 x i32> %v3
+}
+
+define <2 x i32> @f15(<2 x i32> %a0, <2 x i32> %a1) #1 {
+; CHECK-LABEL: f15:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5:4 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = xor(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = xor <2 x i1> %v0, %v1
+  %v3 = sext <2 x i1> %v2 to <2 x i32>
+  ret <2 x i32> %v3
+}
+
+define <2 x i32> @f16(<2 x i32> %a0, <2 x i32> %a1) #1 {
+; CHECK-LABEL: f16:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5:4 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p0,!p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
+  %v3 = and <2 x i1> %v0, %v2
+  %v4 = sext <2 x i1> %v3 to <2 x i32>
+  ret <2 x i32> %v4
+}
+
+define <2 x i32> @f17(<2 x i32> %a0, <2 x i32> %a1) #1 {
+; CHECK-LABEL: f17:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5:4 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p0,!p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
+  %v3 = or <2 x i1> %v0, %v2
+  %v4 = sext <2 x i1> %v3 to <2 x i32>
+  ret <2 x i32> %v4
+}
+
+define <2 x i32> @f18(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
+; CHECK-LABEL: f18:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7:6 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpw.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,and(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
+  %v3 = and <2 x i1> %v0, %v1
+  %v4 = and <2 x i1> %v2, %v3
+  %v5 = sext <2 x i1> %v4 to <2 x i32>
+  ret <2 x i32> %v5
+}
+
+define <2 x i32> @f19(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
+; CHECK-LABEL: f19:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7:6 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpw.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,or(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
+  %v3 = or <2 x i1> %v0, %v1
+  %v4 = and <2 x i1> %v2, %v3
+  %v5 = sext <2 x i1> %v4 to <2 x i32>
+  ret <2 x i32> %v5
+}
+
+define <2 x i32> @f20(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
+; CHECK-LABEL: f20:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7:6 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpw.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,and(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
+  %v3 = and <2 x i1> %v0, %v1
+  %v4 = or <2 x i1> %v2, %v3
+  %v5 = sext <2 x i1> %v4 to <2 x i32>
+  ret <2 x i32> %v5
+}
+
+define <2 x i32> @f21(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
+; CHECK-LABEL: f21:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7:6 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpw.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,or(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
+  %v3 = or <2 x i1> %v0, %v1
+  %v4 = or <2 x i1> %v2, %v3
+  %v5 = sext <2 x i1> %v4 to <2 x i32>
+  ret <2 x i32> %v5
+}
+
+define <2 x i32> @f22(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
+; CHECK-LABEL: f22:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7:6 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpw.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,and(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
+  %v3 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
+  %v4 = and <2 x i1> %v0, %v2
+  %v5 = and <2 x i1> %v3, %v4
+  %v6 = sext <2 x i1> %v5 to <2 x i32>
+  ret <2 x i32> %v6
+}
+
+define <2 x i32> @f23(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
+; CHECK-LABEL: f23:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7:6 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpw.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,or(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
+  %v3 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
+  %v4 = or <2 x i1> %v0, %v2
+  %v5 = and <2 x i1> %v3, %v4
+  %v6 = sext <2 x i1> %v5 to <2 x i32>
+  ret <2 x i32> %v6
+}
+
+define <2 x i32> @f24(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
+; CHECK-LABEL: f24:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7:6 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpw.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,and(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
+  %v3 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
+  %v4 = and <2 x i1> %v0, %v2
+  %v5 = or <2 x i1> %v3, %v4
+  %v6 = sext <2 x i1> %v5 to <2 x i32>
+  ret <2 x i32> %v6
+}
+
+define <2 x i32> @f25(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2) #1 {
+; CHECK-LABEL: f25:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7:6 = combine(#1,#1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpw.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,or(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <2 x i32> %a0, <i32 1, i32 1>
+  %v1 = icmp eq <2 x i32> %a1, <i32 1, i32 1>
+  %v2 = xor <2 x i1> %v1, <i1 true, i1 true>
+  %v3 = icmp eq <2 x i32> %a2, <i32 1, i32 1>
+  %v4 = or <2 x i1> %v0, %v2
+  %v5 = or <2 x i1> %v3, %v4
+  %v6 = sext <2 x i1> %v5 to <2 x i32>
+  ret <2 x i32> %v6
+}
+
+define <4 x i16> @f26(<4 x i16> %a0, <4 x i16> %a1) #1 {
+; CHECK-LABEL: f26:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r4 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = and <4 x i1> %v0, %v1
+  %v3 = sext <4 x i1> %v2 to <4 x i16>
+  ret <4 x i16> %v3
+}
+
+define <4 x i16> @f27(<4 x i16> %a0, <4 x i16> %a1) #1 {
+; CHECK-LABEL: f27:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r4 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = or <4 x i1> %v0, %v1
+  %v3 = sext <4 x i1> %v2 to <4 x i16>
+  ret <4 x i16> %v3
+}
+
+define <4 x i16> @f28(<4 x i16> %a0, <4 x i16> %a1) #1 {
+; CHECK-LABEL: f28:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r4 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = xor(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = xor <4 x i1> %v0, %v1
+  %v3 = sext <4 x i1> %v2 to <4 x i16>
+  ret <4 x i16> %v3
+}
+
+define <4 x i16> @f29(<4 x i16> %a0, <4 x i16> %a1) #1 {
+; CHECK-LABEL: f29:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r4 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p0,!p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
+  %v3 = and <4 x i1> %v0, %v2
+  %v4 = sext <4 x i1> %v3 to <4 x i16>
+  ret <4 x i16> %v4
+}
+
+define <4 x i16> @f30(<4 x i16> %a0, <4 x i16> %a1) #1 {
+; CHECK-LABEL: f30:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r4 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p0,!p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
+  %v3 = or <4 x i1> %v0, %v2
+  %v4 = sext <4 x i1> %v3 to <4 x i16>
+  ret <4 x i16> %v4
+}
+
+define <4 x i16> @f31(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
+; CHECK-LABEL: f31:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmph.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,and(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
+  %v3 = and <4 x i1> %v0, %v1
+  %v4 = and <4 x i1> %v2, %v3
+  %v5 = sext <4 x i1> %v4 to <4 x i16>
+  ret <4 x i16> %v5
+}
+
+define <4 x i16> @f32(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
+; CHECK-LABEL: f32:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmph.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,or(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
+  %v3 = or <4 x i1> %v0, %v1
+  %v4 = and <4 x i1> %v2, %v3
+  %v5 = sext <4 x i1> %v4 to <4 x i16>
+  ret <4 x i16> %v5
+}
+
+define <4 x i16> @f33(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
+; CHECK-LABEL: f33:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmph.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,and(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
+  %v3 = and <4 x i1> %v0, %v1
+  %v4 = or <4 x i1> %v2, %v3
+  %v5 = sext <4 x i1> %v4 to <4 x i16>
+  ret <4 x i16> %v5
+}
+
+define <4 x i16> @f34(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
+; CHECK-LABEL: f34:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmph.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,or(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
+  %v3 = or <4 x i1> %v0, %v1
+  %v4 = or <4 x i1> %v2, %v3
+  %v5 = sext <4 x i1> %v4 to <4 x i16>
+  ret <4 x i16> %v5
+}
+
+define <4 x i16> @f35(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
+; CHECK-LABEL: f35:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmph.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,and(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
+  %v3 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
+  %v4 = and <4 x i1> %v0, %v2
+  %v5 = and <4 x i1> %v3, %v4
+  %v6 = sext <4 x i1> %v5 to <4 x i16>
+  ret <4 x i16> %v6
+}
+
+define <4 x i16> @f36(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
+; CHECK-LABEL: f36:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmph.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,or(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
+  %v3 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
+  %v4 = or <4 x i1> %v0, %v2
+  %v5 = and <4 x i1> %v3, %v4
+  %v6 = sext <4 x i1> %v5 to <4 x i16>
+  ret <4 x i16> %v6
+}
+
+define <4 x i16> @f37(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
+; CHECK-LABEL: f37:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmph.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,and(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
+  %v3 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
+  %v4 = and <4 x i1> %v0, %v2
+  %v5 = or <4 x i1> %v3, %v4
+  %v6 = sext <4 x i1> %v5 to <4 x i16>
+  ret <4 x i16> %v6
+}
+
+define <4 x i16> @f38(<4 x i16> %a0, <4 x i16> %a1, <4 x i16> %a2) #1 {
+; CHECK-LABEL: f38:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##65537
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmph.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmph.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmph.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,or(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <4 x i16> %a0, <i16 1, i16 1, i16 1, i16 1>
+  %v1 = icmp eq <4 x i16> %a1, <i16 1, i16 1, i16 1, i16 1>
+  %v2 = xor <4 x i1> %v1, <i1 true, i1 true, i1 true, i1 true>
+  %v3 = icmp eq <4 x i16> %a2, <i16 1, i16 1, i16 1, i16 1>
+  %v4 = or <4 x i1> %v0, %v2
+  %v5 = or <4 x i1> %v3, %v4
+  %v6 = sext <4 x i1> %v5 to <4 x i16>
+  ret <4 x i16> %v6
+}
+
+define <8 x i8> @f39(<8 x i8> %a0, <8 x i8> %a1) #1 {
+; CHECK-LABEL: f39:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r4 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = and <8 x i1> %v0, %v1
+  %v3 = sext <8 x i1> %v2 to <8 x i8>
+  ret <8 x i8> %v3
+}
+
+define <8 x i8> @f40(<8 x i8> %a0, <8 x i8> %a1) #1 {
+; CHECK-LABEL: f40:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r4 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = or <8 x i1> %v0, %v1
+  %v3 = sext <8 x i1> %v2 to <8 x i8>
+  ret <8 x i8> %v3
+}
+
+define <8 x i8> @f41(<8 x i8> %a0, <8 x i8> %a1) #1 {
+; CHECK-LABEL: f41:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r4 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = xor(p0,p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = xor <8 x i1> %v0, %v1
+  %v3 = sext <8 x i1> %v2 to <8 x i8>
+  ret <8 x i8> %v3
+}
+
+define <8 x i8> @f42(<8 x i8> %a0, <8 x i8> %a1) #1 {
+; CHECK-LABEL: f42:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r4 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p0,!p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v3 = and <8 x i1> %v0, %v2
+  %v4 = sext <8 x i1> %v3 to <8 x i8>
+  ret <8 x i8> %v4
+}
+
+define <8 x i8> @f43(<8 x i8> %a0, <8 x i8> %a1) #1 {
+; CHECK-LABEL: f43:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r4 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r5:4)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p0,!p1)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v3 = or <8 x i1> %v0, %v2
+  %v4 = sext <8 x i1> %v3 to <8 x i8>
+  ret <8 x i8> %v4
+}
+
+define <8 x i8> @f44(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
+; CHECK-LABEL: f44:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpb.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,and(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v3 = and <8 x i1> %v0, %v1
+  %v4 = and <8 x i1> %v2, %v3
+  %v5 = sext <8 x i1> %v4 to <8 x i8>
+  ret <8 x i8> %v5
+}
+
+define <8 x i8> @f45(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
+; CHECK-LABEL: f45:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpb.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,or(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v3 = or <8 x i1> %v0, %v1
+  %v4 = and <8 x i1> %v2, %v3
+  %v5 = sext <8 x i1> %v4 to <8 x i8>
+  ret <8 x i8> %v5
+}
+
+define <8 x i8> @f46(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
+; CHECK-LABEL: f46:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpb.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,and(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v3 = and <8 x i1> %v0, %v1
+  %v4 = or <8 x i1> %v2, %v3
+  %v5 = sext <8 x i1> %v4 to <8 x i8>
+  ret <8 x i8> %v5
+}
+
+define <8 x i8> @f47(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
+; CHECK-LABEL: f47:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpb.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,or(p0,p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v3 = or <8 x i1> %v0, %v1
+  %v4 = or <8 x i1> %v2, %v3
+  %v5 = sext <8 x i1> %v4 to <8 x i8>
+  ret <8 x i8> %v5
+}
+
+define <8 x i8> @f48(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
+; CHECK-LABEL: f48:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpb.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,and(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v3 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v4 = and <8 x i1> %v0, %v2
+  %v5 = and <8 x i1> %v3, %v4
+  %v6 = sext <8 x i1> %v5 to <8 x i8>
+  ret <8 x i8> %v6
+}
+
+define <8 x i8> @f49(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
+; CHECK-LABEL: f49:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpb.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = and(p2,or(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v3 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v4 = or <8 x i1> %v0, %v2
+  %v5 = and <8 x i1> %v3, %v4
+  %v6 = sext <8 x i1> %v5 to <8 x i8>
+  ret <8 x i8> %v6
+}
+
+define <8 x i8> @f50(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
+; CHECK-LABEL: f50:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpb.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,and(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v3 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v4 = and <8 x i1> %v0, %v2
+  %v5 = or <8 x i1> %v3, %v4
+  %v6 = sext <8 x i1> %v5 to <8 x i8>
+  ret <8 x i8> %v6
+}
+
+define <8 x i8> @f51(<8 x i8> %a0, <8 x i8> %a1, <8 x i8> %a2) #1 {
+; CHECK-LABEL: f51:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r6 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7 = ##16843009
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpb.eq(r1:0,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpb.eq(r3:2,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p2 = vcmpb.eq(r5:4,r7:6)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = or(p2,or(p0,!p1))
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = mask(p0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
+b0:
+  %v0 = icmp eq <8 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v1 = icmp eq <8 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v2 = xor <8 x i1> %v1, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+  %v3 = icmp eq <8 x i8> %a2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %v4 = or <8 x i1> %v0, %v2
+  %v5 = or <8 x i1> %v3, %v4
+  %v6 = sext <8 x i1> %v5 to <8 x i8>
+  ret <8 x i8> %v6
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind "target-features"="-small-data,-packets" }


        


More information about the llvm-commits mailing list