[clang] [llvm] Add support for flag output operand "=@cc" for SystemZ. (PR #125970)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 1 19:02:37 PDT 2025


https://github.com/anoopkg6 updated https://github.com/llvm/llvm-project/pull/125970

>From 8a07b3dd922c11e27d922c0db7b08be449167c09 Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoop.kumar6 at ibm.com>
Date: Wed, 5 Feb 2025 23:57:13 +0100
Subject: [PATCH 01/12] Add support for flag output operand "=@cc" for SystemZ
 and optimizing conditional branch for 14 possible combinations of CC mask.

---
 clang/lib/Basic/Targets/SystemZ.cpp           |   11 +
 clang/lib/Basic/Targets/SystemZ.h             |    5 +
 clang/lib/CodeGen/CGStmt.cpp                  |   10 +-
 .../CodeGen/inline-asm-systemz-flag-output.c  |  149 +
 llvm/include/llvm/CodeGen/TargetLowering.h    |    3 +
 .../SelectionDAG/SelectionDAGBuilder.cpp      |   70 +-
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |    4 +
 .../Target/SystemZ/SystemZISelLowering.cpp    |  600 +-
 llvm/lib/Target/SystemZ/SystemZISelLowering.h |   14 +
 .../SystemZ/flag_output_operand_ccand.ll      |  500 ++
 .../flag_output_operand_ccand_eq_noteq.ll     |  939 +++
 .../SystemZ/flag_output_operand_ccand_not.ll  |  779 +++
 .../SystemZ/flag_output_operand_ccmixed.ll    | 2427 ++++++++
 .../flag_output_operand_ccmixed_eq_noteq.ll   | 5248 +++++++++++++++++
 .../flag_output_operand_ccmixed_not.ll        | 2543 ++++++++
 .../SystemZ/flag_output_operand_ccor.ll       | 1047 ++++
 .../flag_output_operand_ccor_eq_noteq.ll      |  854 +++
 .../SystemZ/flag_output_operand_ccor_not.ll   |  806 +++
 .../SystemZ/flag_output_operand_ccxor.ll      |  784 +++
 .../flag_output_operand_ccxor_eq_noteq.ll     | 1083 ++++
 .../SystemZ/flag_output_operand_ccxor_not.ll  |  778 +++
 21 files changed, 18641 insertions(+), 13 deletions(-)
 create mode 100644 clang/test/CodeGen/inline-asm-systemz-flag-output.c
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccand.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_eq_noteq.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_not.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_not.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccor.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccor_eq_noteq.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccor_not.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_eq_noteq.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_not.ll

diff --git a/clang/lib/Basic/Targets/SystemZ.cpp b/clang/lib/Basic/Targets/SystemZ.cpp
index 06f08db2eadd4..49f88b45220d0 100644
--- a/clang/lib/Basic/Targets/SystemZ.cpp
+++ b/clang/lib/Basic/Targets/SystemZ.cpp
@@ -90,6 +90,14 @@ bool SystemZTargetInfo::validateAsmConstraint(
   case 'T': // Likewise, plus an index
     Info.setAllowsMemory();
     return true;
+  case '@':
+    // CC condition changes.
+    if (strlen(Name) >= 3 && *(Name + 1) == 'c' && *(Name + 2) == 'c') {
+      Name += 2;
+      Info.setAllowsRegister();
+      return true;
+    }
+    return false;
   }
 }
 
@@ -150,6 +158,9 @@ unsigned SystemZTargetInfo::getMinGlobalAlign(uint64_t Size,
 
 void SystemZTargetInfo::getTargetDefines(const LangOptions &Opts,
                                          MacroBuilder &Builder) const {
+  // Inline assembly supports SystemZ flag outputs.
+  Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
+
   Builder.defineMacro("__s390__");
   Builder.defineMacro("__s390x__");
   Builder.defineMacro("__zarch__");
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index ef9a07033a6e4..a6909ababdec0 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -118,6 +118,11 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
                              TargetInfo::ConstraintInfo &info) const override;
 
   std::string convertConstraint(const char *&Constraint) const override {
+    if (strncmp(Constraint, "@cc", 3) == 0) {
+      std::string Converted = "{" + std::string(Constraint, 3) + "}";
+      Constraint += 3;
+      return Converted;
+    }
     switch (Constraint[0]) {
     case 'p': // Keep 'p' constraint.
       return std::string("p");
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 41dc91c578c80..27f7bb6528958 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -2563,9 +2563,15 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
     if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
       // Target must guarantee the Value `Tmp` here is lowered to a boolean
       // value.
-      llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
+      unsigned CCUpperBound = 2;
+      if (CGF.getTarget().getTriple().getArch() == llvm::Triple::systemz) {
+        // On this target CC value can be in range [0, 3].
+        CCUpperBound = 4;
+      }
+      llvm::Constant *CCUpperBoundConst =
+          llvm::ConstantInt::get(Tmp->getType(), CCUpperBound);
       llvm::Value *IsBooleanValue =
-          Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
+          Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, CCUpperBoundConst);
       llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
       Builder.CreateCall(FnAssume, IsBooleanValue);
     }
diff --git a/clang/test/CodeGen/inline-asm-systemz-flag-output.c b/clang/test/CodeGen/inline-asm-systemz-flag-output.c
new file mode 100644
index 0000000000000..ab90e031df1f2
--- /dev/null
+++ b/clang/test/CodeGen/inline-asm-systemz-flag-output.c
@@ -0,0 +1,149 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -triple s390x-linux -emit-llvm -o - %s | FileCheck %s
+// CHECK-LABEL: define dso_local signext i32 @foo_012(
+// CHECK-SAME: i32 noundef signext [[X:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*]]:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[CC:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store i32 [[X]], ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 [[TMP0]]) #[[ATTR2:[0-9]+]], !srcloc [[META2:![0-9]+]]
+// CHECK-NEXT:    [[ASMRESULT:%.*]] = extractvalue { i32, i32 } [[TMP1]], 0
+// CHECK-NEXT:    [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP1]], 1
+// CHECK-NEXT:    store i32 [[ASMRESULT]], ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[ASMRESULT1]], 4
+// CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
+// CHECK-NEXT:    store i32 [[ASMRESULT1]], ptr [[CC]], align 4
+// CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP3]], 0
+// CHECK-NEXT:    br i1 [[CMP]], label %[[LOR_END:.*]], label %[[LOR_LHS_FALSE:.*]]
+// CHECK:       [[LOR_LHS_FALSE]]:
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[TMP4]], 1
+// CHECK-NEXT:    br i1 [[CMP2]], label %[[LOR_END]], label %[[LOR_RHS:.*]]
+// CHECK:       [[LOR_RHS]]:
+// CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP3:%.*]] = icmp eq i32 [[TMP5]], 2
+// CHECK-NEXT:    br label %[[LOR_END]]
+// CHECK:       [[LOR_END]]:
+// CHECK-NEXT:    [[TMP6:%.*]] = phi i1 [ true, %[[LOR_LHS_FALSE]] ], [ true, %[[ENTRY]] ], [ [[CMP3]], %[[LOR_RHS]] ]
+// CHECK-NEXT:    [[TMP7:%.*]] = zext i1 [[TMP6]] to i64
+// CHECK-NEXT:    [[COND:%.*]] = select i1 [[TMP6]], i32 42, i32 0
+// CHECK-NEXT:    ret i32 [[COND]]
+//
+int foo_012(int x) {
+  int cc;
+  asm volatile ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
+  return cc == 0 || cc == 1 || cc == 2 ? 42 : 0;
+}
+
+// CHECK-LABEL: define dso_local signext i32 @foo_013(
+// CHECK-SAME: i32 noundef signext [[X:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*]]:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[CC:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store i32 [[X]], ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 [[TMP0]]) #[[ATTR2]], !srcloc [[META3:![0-9]+]]
+// CHECK-NEXT:    [[ASMRESULT:%.*]] = extractvalue { i32, i32 } [[TMP1]], 0
+// CHECK-NEXT:    [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP1]], 1
+// CHECK-NEXT:    store i32 [[ASMRESULT]], ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[ASMRESULT1]], 4
+// CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
+// CHECK-NEXT:    store i32 [[ASMRESULT1]], ptr [[CC]], align 4
+// CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP3]], 0
+// CHECK-NEXT:    br i1 [[CMP]], label %[[LOR_END:.*]], label %[[LOR_LHS_FALSE:.*]]
+// CHECK:       [[LOR_LHS_FALSE]]:
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[TMP4]], 1
+// CHECK-NEXT:    br i1 [[CMP2]], label %[[LOR_END]], label %[[LOR_RHS:.*]]
+// CHECK:       [[LOR_RHS]]:
+// CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP3:%.*]] = icmp eq i32 [[TMP5]], 3
+// CHECK-NEXT:    br label %[[LOR_END]]
+// CHECK:       [[LOR_END]]:
+// CHECK-NEXT:    [[TMP6:%.*]] = phi i1 [ true, %[[LOR_LHS_FALSE]] ], [ true, %[[ENTRY]] ], [ [[CMP3]], %[[LOR_RHS]] ]
+// CHECK-NEXT:    [[TMP7:%.*]] = zext i1 [[TMP6]] to i64
+// CHECK-NEXT:    [[COND:%.*]] = select i1 [[TMP6]], i32 42, i32 0
+// CHECK-NEXT:    ret i32 [[COND]]
+//
+int foo_013(int x) {
+  int cc;
+  asm volatile ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
+  return cc == 0 || cc == 1 || cc == 3 ? 42 : 0;
+}
+
+// CHECK-LABEL: define dso_local signext i32 @foo_023(
+// CHECK-SAME: i32 noundef signext [[X:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*]]:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[CC:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store i32 [[X]], ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 [[TMP0]]) #[[ATTR2]], !srcloc [[META4:![0-9]+]]
+// CHECK-NEXT:    [[ASMRESULT:%.*]] = extractvalue { i32, i32 } [[TMP1]], 0
+// CHECK-NEXT:    [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP1]], 1
+// CHECK-NEXT:    store i32 [[ASMRESULT]], ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[ASMRESULT1]], 4
+// CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
+// CHECK-NEXT:    store i32 [[ASMRESULT1]], ptr [[CC]], align 4
+// CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP3]], 0
+// CHECK-NEXT:    br i1 [[CMP]], label %[[LOR_END:.*]], label %[[LOR_LHS_FALSE:.*]]
+// CHECK:       [[LOR_LHS_FALSE]]:
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[TMP4]], 2
+// CHECK-NEXT:    br i1 [[CMP2]], label %[[LOR_END]], label %[[LOR_RHS:.*]]
+// CHECK:       [[LOR_RHS]]:
+// CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP3:%.*]] = icmp eq i32 [[TMP5]], 3
+// CHECK-NEXT:    br label %[[LOR_END]]
+// CHECK:       [[LOR_END]]:
+// CHECK-NEXT:    [[TMP6:%.*]] = phi i1 [ true, %[[LOR_LHS_FALSE]] ], [ true, %[[ENTRY]] ], [ [[CMP3]], %[[LOR_RHS]] ]
+// CHECK-NEXT:    [[TMP7:%.*]] = zext i1 [[TMP6]] to i64
+// CHECK-NEXT:    [[COND:%.*]] = select i1 [[TMP6]], i32 42, i32 0
+// CHECK-NEXT:    ret i32 [[COND]]
+//
+int foo_023(int x) {
+  int cc;
+  asm volatile ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
+  return cc == 0 || cc == 2 || cc == 3 ? 42 : 0;
+}
+
+// CHECK-LABEL: define dso_local signext i32 @foo_123(
+// CHECK-SAME: i32 noundef signext [[X:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*]]:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[CC:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store i32 [[X]], ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 [[TMP0]]) #[[ATTR2]], !srcloc [[META5:![0-9]+]]
+// CHECK-NEXT:    [[ASMRESULT:%.*]] = extractvalue { i32, i32 } [[TMP1]], 0
+// CHECK-NEXT:    [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP1]], 1
+// CHECK-NEXT:    store i32 [[ASMRESULT]], ptr [[X_ADDR]], align 4
+// CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[ASMRESULT1]], 4
+// CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
+// CHECK-NEXT:    store i32 [[ASMRESULT1]], ptr [[CC]], align 4
+// CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP3]], 1
+// CHECK-NEXT:    br i1 [[CMP]], label %[[LOR_END:.*]], label %[[LOR_LHS_FALSE:.*]]
+// CHECK:       [[LOR_LHS_FALSE]]:
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[TMP4]], 2
+// CHECK-NEXT:    br i1 [[CMP2]], label %[[LOR_END]], label %[[LOR_RHS:.*]]
+// CHECK:       [[LOR_RHS]]:
+// CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[CC]], align 4
+// CHECK-NEXT:    [[CMP3:%.*]] = icmp eq i32 [[TMP5]], 3
+// CHECK-NEXT:    br label %[[LOR_END]]
+// CHECK:       [[LOR_END]]:
+// CHECK-NEXT:    [[TMP6:%.*]] = phi i1 [ true, %[[LOR_LHS_FALSE]] ], [ true, %[[ENTRY]] ], [ [[CMP3]], %[[LOR_RHS]] ]
+// CHECK-NEXT:    [[TMP7:%.*]] = zext i1 [[TMP6]] to i64
+// CHECK-NEXT:    [[COND:%.*]] = select i1 [[TMP6]], i32 42, i32 0
+// CHECK-NEXT:    ret i32 [[COND]]
+//
+int foo_123(int x) {
+  int cc;
+  asm volatile ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
+  return cc == 1 || cc == 2 || cc == 3 ? 42 : 0;
+}
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index e0b638201a047..cb136fe2f446b 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -5071,6 +5071,9 @@ class TargetLowering : public TargetLoweringBase {
                                             std::vector<SDValue> &Ops,
                                             SelectionDAG &DAG) const;
 
+  // Lower switch statement for flag output operand with SRL/IPM Sequence.
+  virtual bool canLowerSRL_IPM_Switch(SDValue Cond) const;
+
   // Lower custom output constraints. If invalid, return SDValue().
   virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue,
                                               const SDLoc &DL,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 3b046aa25f544..a32787bc882f1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2831,8 +2831,37 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
       Opcode = Instruction::And;
     else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
       Opcode = Instruction::Or;
-
-    if (Opcode &&
+    auto &TLI = DAG.getTargetLoweringInfo();
+    bool BrSrlIPM = FuncInfo.MF->getTarget().getTargetTriple().getArch() ==
+                    Triple::ArchType::systemz;
+    // For Flag output operands SRL/IPM sequence, we want to avoid
+    // creating switch case, as it creates Basic Block and inhibits
+    // optimization in DAGCombiner for flag output operands.
+    const auto checkSRLIPM = [&TLI](const SDValue &Op) {
+      if (!Op.getNumOperands())
+        return false;
+      SDValue OpVal = Op.getOperand(0);
+      SDNode *N = OpVal.getNode();
+      if (N && N->getOpcode() == ISD::SRL)
+        return TLI.canLowerSRL_IPM_Switch(OpVal);
+      else if (N && OpVal.getNumOperands() &&
+               (N->getOpcode() == ISD::AND || N->getOpcode() == ISD::OR)) {
+        SDValue OpVal1 = OpVal.getOperand(0);
+        SDNode *N1 = OpVal1.getNode();
+        if (N1 && N1->getOpcode() == ISD::SRL)
+          return TLI.canLowerSRL_IPM_Switch(OpVal1);
+      }
+      return false;
+    };
+    if (BrSrlIPM) {
+      if (NodeMap.count(BOp0) && NodeMap[BOp0].getNode()) {
+        BrSrlIPM &= checkSRLIPM(getValue(BOp0));
+        if (NodeMap.count(BOp1) && NodeMap[BOp1].getNode())
+          BrSrlIPM &= checkSRLIPM(getValue(BOp1));
+      } else
+        BrSrlIPM = false;
+    }
+    if (Opcode && !BrSrlIPM &&
         !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
           match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value()))) &&
         !shouldKeepJumpConditionsTogether(
@@ -12043,18 +12072,41 @@ void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
       const APInt &SmallValue = Small.Low->getValue();
       const APInt &BigValue = Big.Low->getValue();
 
+      // Creating switch cases optimizing tranformation inhibits DAGCombiner
+      // for SystemZ for flag output operands. DAGCobiner compute cumulative
+      // CCMask for flag output operands SRL/IPM sequence, we want to avoid
+      // creating switch case, as it creates Basic Block and inhibits
+      // optimization in DAGCombiner for flag output operands.
+      // cases like (CC == 0) || (CC == 2) || (CC == 3), or
+      // (CC == 0) || (CC == 1) ^ (CC == 3), there could potentially be
+      // more cases like this.
+      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+      bool IsSrlIPM = false;
+      if (NodeMap.count(Cond) && NodeMap[Cond].getNode())
+        IsSrlIPM = CurMF->getTarget().getTargetTriple().getArch() ==
+                       Triple::ArchType::systemz &&
+                   TLI.canLowerSRL_IPM_Switch(getValue(Cond));
       // Check that there is only one bit different.
       APInt CommonBit = BigValue ^ SmallValue;
-      if (CommonBit.isPowerOf2()) {
+      if (CommonBit.isPowerOf2() || IsSrlIPM) {
         SDValue CondLHS = getValue(Cond);
         EVT VT = CondLHS.getValueType();
         SDLoc DL = getCurSDLoc();
-
-        SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
-                                 DAG.getConstant(CommonBit, DL, VT));
-        SDValue Cond = DAG.getSetCC(
-            DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
-            ISD::SETEQ);
+        SDValue Cond;
+
+        if (CommonBit.isPowerOf2()) {
+          SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
+                                   DAG.getConstant(CommonBit, DL, VT));
+          Cond = DAG.getSetCC(DL, MVT::i1, Or,
+                              DAG.getConstant(BigValue | SmallValue, DL, VT),
+                              ISD::SETEQ);
+        } else if (IsSrlIPM && BigValue == 3 && SmallValue == 0) {
+          SDValue SetCC =
+              DAG.getSetCC(DL, MVT::i32, CondLHS,
+                           DAG.getConstant(SmallValue, DL, VT), ISD::SETEQ);
+          Cond = DAG.getSetCC(DL, MVT::i32, SetCC,
+                              DAG.getConstant(BigValue, DL, VT), ISD::SETEQ);
+        }
 
         // Update successor info.
         // Both Small and Big will jump to Small.BB, so we sum up the
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 8287565336b54..3d48adac509cb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -5563,6 +5563,10 @@ const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
   return nullptr;
 }
 
+bool TargetLowering::canLowerSRL_IPM_Switch(SDValue Cond) const {
+  return false;
+}
+
 SDValue TargetLowering::LowerAsmOutputForConstraint(
     SDValue &Chain, SDValue &Glue, const SDLoc &DL,
     const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 3999b54de81b6..259da48a3b223 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -1207,6 +1207,9 @@ SystemZTargetLowering::getConstraintType(StringRef Constraint) const {
     default:
       break;
     }
+  } else if (Constraint.size() == 5 && Constraint.starts_with("{")) {
+    if (StringRef("{@cc}").compare(Constraint) == 0)
+      return C_Other;
   }
   return TargetLowering::getConstraintType(Constraint);
 }
@@ -1389,6 +1392,10 @@ SystemZTargetLowering::getRegForInlineAsmConstraint(
       return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass,
                                  SystemZMC::VR128Regs, 32);
     }
+    if (Constraint[1] == '@') {
+      if (StringRef("{@cc}").compare(Constraint) == 0)
+        return std::make_pair(0u, &SystemZ::GR32BitRegClass);
+    }
   }
   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
 }
@@ -1421,6 +1428,35 @@ Register SystemZTargetLowering::getExceptionSelectorRegister(
   return Subtarget.isTargetXPLINK64() ? SystemZ::R2D : SystemZ::R7D;
 }
 
+// Lower @cc targets via setcc.
+SDValue SystemZTargetLowering::LowerAsmOutputForConstraint(
+    SDValue &Chain, SDValue &Glue, const SDLoc &DL,
+    const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
+  if (StringRef("{@cc}").compare(OpInfo.ConstraintCode) != 0)
+    return SDValue();
+
+  // Check that return type is valid.
+  if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
+      OpInfo.ConstraintVT.getSizeInBits() < 8)
+    report_fatal_error("Glue output operand is of invalid type");
+
+  MachineFunction &MF = DAG.getMachineFunction();
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+  MRI.addLiveIn(SystemZ::CC);
+
+  if (Glue.getNode()) {
+    Glue = DAG.getCopyFromReg(Chain, DL, SystemZ::CC, MVT::i32, Glue);
+    Chain = Glue.getValue(1);
+  } else
+    Glue = DAG.getCopyFromReg(Chain, DL, SystemZ::CC, MVT::i32);
+
+  SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
+  SDValue CC = DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
+                           DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
+
+  return CC;
+}
+
 void SystemZTargetLowering::LowerAsmOperandForConstraint(
     SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
     SelectionDAG &DAG) const {
@@ -2485,6 +2521,21 @@ static unsigned CCMaskForCondCode(ISD::CondCode CC) {
 #undef CONV
 }
 
+static unsigned CCMaskForSystemZCCVal(unsigned CC) {
+  switch (CC) {
+  default:
+    llvm_unreachable("invalid integer condition!");
+  case 0:
+    return SystemZ::CCMASK_CMP_EQ;
+  case 1:
+    return SystemZ::CCMASK_CMP_LT;
+  case 2:
+    return SystemZ::CCMASK_CMP_GT;
+  case 3:
+    return SystemZ::CCMASK_CMP_UO;
+  }
+}
+
 // If C can be converted to a comparison against zero, adjust the operands
 // as necessary.
 static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
@@ -7657,6 +7708,413 @@ SDValue SystemZTargetLowering::combineBSWAP(
   return SDValue();
 }
 
+// Combine IPM sequence for flag output operands.
+static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
+  // Convert CCVal to CCMask and update it along with  CCValid.
+  const auto convertCCValToCCMask = [&CCMask, &CCValid](int CCVal) {
+    bool Invert = false;
+    if (CCMask == SystemZ::CCMASK_CMP_NE)
+      Invert = !Invert;
+    if (CCMask == SystemZ::CCMASK_CMP_EQ || CCMask == SystemZ::CCMASK_CMP_NE) {
+      CCMask = CCMaskForSystemZCCVal(CCVal);
+      if (Invert)
+        CCMask ^= SystemZ::CCMASK_ANY;
+      CCValid = SystemZ::CCMASK_ANY;
+      return true;
+    } else if (CCMask == SystemZ::CCMASK_CMP_LT) {
+      // CC in range [0, CCVal).
+      CCMask = ((~0U << (4 - CCVal)) & SystemZ::CCMASK_ANY);
+      CCValid = SystemZ::CCMASK_ANY;
+      return true;
+    } else if (CCMask == SystemZ::CCMASK_CMP_GT) {
+      // CC in range (CCVal, 3].
+      CCMask = (~(~0U << (3 - CCVal))) & SystemZ::CCMASK_ANY;
+      CCValid = SystemZ::CCMASK_ANY;
+      return true;
+    }
+    return false;
+  };
+  // Check (SRL (IPM (CC))) and update CCReg to combine.
+  const auto isSRL_IPM_CCSequence = [&CCReg](SDNode *N) {
+    if (!N || N->getOpcode() != ISD::SRL)
+      return false;
+    auto *SRLCount = dyn_cast<ConstantSDNode>(N->getOperand(1));
+    if (!SRLCount || SRLCount->getZExtValue() != SystemZ::IPM_CC)
+      return false;
+    auto *IPM = N->getOperand(0).getNode();
+    if (!IPM || IPM->getOpcode() != SystemZISD::IPM)
+      return false;
+    auto *IPMOp0 = IPM->getOperand(0).getNode();
+    if (!IPMOp0 || IPMOp0->getNumOperands() < 2)
+      return false;
+    auto *RN = dyn_cast<RegisterSDNode>(IPMOp0->getOperand(1));
+    if (!RN || !RN->getReg().isPhysical() || RN->getReg() != SystemZ::CC)
+      return false;
+    // Return the updated CCReg link.
+    CCReg = IPM->getOperand(0);
+    return true;
+  };
+  // Check if N has SystemZ::CC operand.
+  const auto isCCOperand = [](SDNode *N) {
+    if (!N || N->getNumOperands() < 2)
+      return false;
+    auto *RN = dyn_cast<RegisterSDNode>(N->getOperand(1));
+    if (!RN || !RN->getReg().isPhysical() || RN->getReg() != SystemZ::CC)
+      return false;
+    return true;
+  };
+
+  auto *CCNode = CCReg.getNode();
+  if (!CCNode)
+    return false;
+
+  int RestoreCCValid = CCValid;
+  // Optimize (TM (IPM (CC)))
+  if (CCNode->getOpcode() == SystemZISD::TM) {
+    bool Invert = false;
+    if (CCMask == SystemZ::CCMASK_TM_SOME_1)
+      Invert = !Invert;
+    auto *N = CCNode->getOperand(0).getNode();
+    auto Shift = dyn_cast<ConstantSDNode>(CCNode->getOperand(1));
+    if (!N || !Shift)
+      return false;
+    if (N->getOpcode() == SystemZISD::IPM) {
+      auto ShiftVal = Shift->getZExtValue();
+      if (ShiftVal == (1 << SystemZ::IPM_CC))
+        CCMask = SystemZ::CCMASK_CMP_GE;
+      if (Invert)
+        CCMask ^= CCValid;
+      // Return the updated CCReg link.
+      CCReg = N->getOperand(0);
+      return true;
+    } else if (N->getOpcode() == ISD::XOR) {
+      // Optimize (TM (XOR (OP1 OP2))).
+      auto *XOROp1 = N->getOperand(0).getNode();
+      auto *XOROp2 = N->getOperand(1).getNode();
+      if (!XOROp1 || !XOROp2)
+        return false;
+      // OP1. (SELECT_CCMASK (ICMP (SRL (IPM (CC))))).
+      // OP2. (SRL (IPM (CC))).
+      if (XOROp1->getOpcode() == SystemZISD::SELECT_CCMASK &&
+          isSRL_IPM_CCSequence(XOROp2)) {
+        auto *CCValid1 = dyn_cast<ConstantSDNode>(XOROp1->getOperand(2));
+        auto *CCMask1 = dyn_cast<ConstantSDNode>(XOROp1->getOperand(3));
+        SDValue XORReg = XOROp1->getOperand(4);
+        if (!CCValid1 || !CCMask1)
+          return false;
+        int CCValidVal = CCValid1->getZExtValue();
+        int CCMaskVal = CCMask1->getZExtValue();
+        if (combineCCIPMMask(XORReg, CCValidVal, CCMaskVal)) {
+          // CC == 0 || CC == 2 for bit 28 Test Under Mask.
+          CCMask = SystemZ::CCMASK_CMP_GE;
+          CCMask ^= CCMaskVal;
+          if (Invert)
+            CCMask ^= CCValid;
+          CCReg = XORReg;
+          return true;
+        }
+      }
+    }
+  }
+  // Optimize (AND (SRL (IPM (CC)))).
+  if (CCNode->getOpcode() == ISD::AND) {
+    auto *N = CCNode->getOperand(0).getNode();
+    if (!isSRL_IPM_CCSequence(N))
+      return false;
+    auto *ANDConst = dyn_cast<ConstantSDNode>(CCNode->getOperand(1));
+    if (!ANDConst)
+      return false;
+    // Bit 28 false (CC == 0) || (CC == 2).
+    // Caller can invert it depending on CCmask there.
+    if (ANDConst->getZExtValue() == 1) {
+      CCMask = SystemZ::CCMASK_0 | SystemZ::CCMASK_2;
+      CCValid = SystemZ::CCMASK_ANY;
+      return true;
+    }
+    CCValid = RestoreCCValid;
+    return false;
+  }
+  // (SELECT_CCMASK (CC)) or (SELECT_CCMASK (ICMP (SRL (IPM (CC)))))
+  if (CCNode->getOpcode() == SystemZISD::SELECT_CCMASK) {
+    auto *CCValidNode = dyn_cast<ConstantSDNode>(CCNode->getOperand(2));
+    auto *CCMaskNode = dyn_cast<ConstantSDNode>(CCNode->getOperand(3));
+    if (!CCValidNode || !CCMaskNode)
+      return false;
+
+    int CCValidVal = CCValidNode->getZExtValue();
+    int CCMaskVal = CCMaskNode->getZExtValue();
+    SDValue CCRegOp = CCNode->getOperand(4);
+    if (combineCCIPMMask(CCRegOp, CCValidVal, CCMaskVal) ||
+        isCCOperand(CCRegOp.getNode())) {
+      CCMask = CCMaskVal;
+      CCValid = SystemZ::CCMASK_ANY;
+      CCReg = CCRegOp;
+      return true;
+    }
+    CCValid = RestoreCCValid;
+    return false;
+  }
+
+  // Both oerands of XOR are (SELECT_CCMASK (ICMP (SRL (IPM (CC))))).
+  if (CCNode->getOpcode() == ISD::XOR) {
+    if (isa<ConstantSDNode>(CCNode->getOperand(0)) ||
+        isa<ConstantSDNode>(CCNode->getOperand(1)))
+      return false;
+    auto *XOROp1 = CCNode->getOperand(0).getNode();
+    auto *XOROp2 = CCNode->getOperand(1).getNode();
+    if (!XOROp1 || !XOROp2)
+      return false;
+    // Both Operands are select_cc.
+    if (XOROp1->getOpcode() == SystemZISD::SELECT_CCMASK &&
+        XOROp2->getOpcode() == SystemZISD::SELECT_CCMASK) {
+      auto *CCValid1 = dyn_cast<ConstantSDNode>(XOROp1->getOperand(2));
+      auto *CCMask1 = dyn_cast<ConstantSDNode>(XOROp1->getOperand(3));
+      auto *CCValid2 = dyn_cast<ConstantSDNode>(XOROp2->getOperand(2));
+      auto *CCMask2 = dyn_cast<ConstantSDNode>(XOROp2->getOperand(3));
+      if (!CCValid1 || !CCMask1 || !CCValid2 || !CCMask2)
+        return false;
+      int CCValidVal1 = CCValid1->getZExtValue();
+      int CCMaskVal1 = CCMask1->getZExtValue();
+      int CCValidVal2 = CCValid2->getZExtValue();
+      int CCMaskVal2 = CCMask2->getZExtValue();
+      SDValue CCReg1 = XOROp1->getOperand(4);
+      SDValue CCReg2 = XOROp2->getOperand(4);
+      if (!combineCCIPMMask(CCReg1, CCValidVal1, CCMaskVal1) ||
+          !combineCCIPMMask(CCReg2, CCValidVal2, CCMaskVal2))
+        return false;
+      CCMask = CCMaskVal1 ^ CCMaskVal2;
+      CCReg = CCReg1;
+      CCValid = SystemZ::CCMASK_ANY;
+      return true;
+    }
+    CCValid = RestoreCCValid;
+    return false;
+  }
+
+  // Rest of the code handle  ICMP cases.
+  // Handle the case (ICMP (OP (SRL (IPM (CC)))))
+  if (!CCNode || CCNode->getOpcode() != SystemZISD::ICMP)
+    return false;
+  auto *LHS = CCNode->getOperand(0).getNode();
+  auto *RHS = dyn_cast<ConstantSDNode>(CCNode->getOperand(1));
+  if (!LHS || LHS->getOpcode() == ISD::Constant)
+    return false;
+
+  // (BR_CC (ICMP (Op1 Op2))), Op1 Op2 will have (SRL (IPM (CC))) sequence.
+  // SystemZ::ICMP second operand is not constant.
+  if (!RHS) {
+    SDValue CmpOp1 = CCNode->getOperand(0);
+    SDValue CmpOp2 = CCNode->getOperand(1);
+    int CCValid1 = CCValid, CCValid2 = CCValid;
+    int CCMask1 = CCMask, CCMask2 = CCMask;
+    bool IsOp1 = combineCCIPMMask(CmpOp1, CCValid1, CCMask1);
+    bool IsOp2 = combineCCIPMMask(CmpOp2, CCValid2, CCMask2);
+    if (IsOp1 && IsOp2) {
+      CCMask = CCMask1 ^ CCMask2;
+      CCReg = CmpOp1;
+      CCValid = SystemZ::CCMASK_ANY;
+      return true;
+    }
+    CCValid = RestoreCCValid;
+    return false;
+  }
+  int CmpVal = RHS->getZExtValue();
+  // (BR_CC (ICMP (SELECT_CCMASK (CC))))
+  if (LHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
+    int CCVal = RHS->getZExtValue();
+    int Mask = CCMaskForSystemZCCVal(CCVal);
+    bool Invert = false;
+    if (CCMask == SystemZ::CCMASK_CMP_NE)
+      Invert = !Invert;
+    SDValue NewCCReg = CCNode->getOperand(0);
+    if (combineCCIPMMask(NewCCReg, CCValid, CCMask)) {
+      CCMask |= Mask;
+      if (Invert)
+        CCMask ^= SystemZ::CCMASK_ANY;
+      CCReg = NewCCReg;
+      CCValid = SystemZ::CCMASK_ANY;
+      return true;
+    }
+    CCValid = RestoreCCValid;
+    return false;
+  }
+  // (BR_CC (ICMP OR ((SRL (IPM (CC))) (SELECT_CCMASK (CC)))))
+  if (LHS->getOpcode() == ISD::OR) {
+    bool Invert = false;
+    if (CCMask == SystemZ::CCMASK_CMP_NE)
+      Invert = !Invert;
+    SDValue OrOp1 = LHS->getOperand(0);
+    SDValue OrOp2 = LHS->getOperand(1);
+    int NewCCMask1 = CCMask, NewCCMask2 = CCMask, NewCCMask = CCMask;
+    if (!isa<ConstantSDNode>(OrOp1) && !isa<ConstantSDNode>(OrOp2)) {
+      bool IsOp1 = combineCCIPMMask(OrOp1, CCValid, NewCCMask1);
+      bool IsOp2 = combineCCIPMMask(OrOp2, CCValid, NewCCMask2);
+      if (!IsOp1 && !IsOp2) {
+        CCValid = RestoreCCValid;
+        return false;
+      }
+      if (IsOp1 && IsOp2) {
+        NewCCMask = NewCCMask1 | NewCCMask2;
+        bool IsEqualCmpVal = NewCCMask == CmpVal;
+        if ((CCMask == SystemZ::CCMASK_CMP_NE && IsEqualCmpVal) ||
+            (CCMask == SystemZ::CCMASK_CMP_EQ && !IsEqualCmpVal))
+          NewCCMask ^= SystemZ::CCMASK_ANY;
+        CCReg = OrOp1;
+        CCMask = NewCCMask;
+        CCValid = SystemZ::CCMASK_ANY;
+        return true;
+      }
+    } else if (isa<ConstantSDNode>(OrOp2)) {
+      if (isSRL_IPM_CCSequence(OrOp1.getNode())) {
+        auto *OrConst = dyn_cast<ConstantSDNode>(OrOp2);
+        int OrConstVal = OrConst->getZExtValue();
+        if (!OrConst || (OrConstVal & 0x3))
+          return false;
+        // setullt unsigned(-2), mask = 0x1100
+        // setugt unsigned(-4), mask = 0x0011
+        CmpVal &= 0x3;
+        if (convertCCValToCCMask(CmpVal))
+          return true;
+      }
+    }
+    CCValid = RestoreCCValid;
+    return false;
+  }
+  // (BR_CC (ICMP AND ((SRL (IPM (CC))) (SELECT_CCMASK (CC)))))
+  if (LHS->getOpcode() == ISD::AND) {
+    bool Invert = false;
+    if (CCMask == SystemZ::CCMASK_CMP_NE)
+      Invert = !Invert;
+    SDValue AndOp1 = LHS->getOperand(0);
+    SDValue AndOp2 = LHS->getOperand(1);
+    int NewCCMask1 = CCMask;
+    int NewCCMask2 = CCMask;
+    int NewCCMask;
+    if (!isa<ConstantSDNode>(AndOp1) && !isa<ConstantSDNode>(AndOp2)) {
+      bool IsOp1 = combineCCIPMMask(AndOp1, CCValid, NewCCMask1);
+      bool IsOp2 = combineCCIPMMask(AndOp2, CCValid, NewCCMask2);
+      if (!IsOp1 && !IsOp2) {
+        CCValid = RestoreCCValid;
+        return false;
+      }
+      if (IsOp1 && IsOp2) {
+        NewCCMask = NewCCMask1 & NewCCMask2;
+        bool IsEqualCmpVal = NewCCMask == CmpVal;
+        if ((CCMask == SystemZ::CCMASK_CMP_NE && IsEqualCmpVal) ||
+            (CCMask == SystemZ::CCMASK_CMP_EQ && !IsEqualCmpVal))
+          NewCCMask ^= SystemZ::CCMASK_ANY;
+        CCMask = NewCCMask;
+        CCReg = AndOp1;
+        CCValid = SystemZ::CCMASK_ANY;
+        return true;
+      } else {
+        if (IsOp1 && isSRL_IPM_CCSequence(AndOp2.getNode()))
+          NewCCMask = NewCCMask1;
+        else if (isSRL_IPM_CCSequence(AndOp2.getNode()) && IsOp2)
+          NewCCMask = NewCCMask2;
+        // Bit 29 set => CC == 2 || CC == 3.
+        if ((NewCCMask & 0x3) == 2)
+          NewCCMask = SystemZ::CCMASK_2 | SystemZ::CCMASK_3;
+        // Bit 28 set => CC == 1 || CC == 3.
+        else if ((NewCCMask & 0x3) == 1)
+          NewCCMask = SystemZ::CCMASK_1 | SystemZ::CCMASK_3;
+        int CCVal = RHS->getZExtValue();
+        int Mask = CCMaskForSystemZCCVal(CCVal);
+        CCMask = Mask | NewCCMask;
+        if (Invert ^ CmpVal)
+          CCMask ^= SystemZ::CCMASK_ANY;
+        CCValid = SystemZ::CCMASK_ANY;
+        return true;
+      }
+    }
+    CCValid = RestoreCCValid;
+    return false;
+  }
+  // Optimize the case where LHS is (ICMP (SRL (IPM))).
+  if (isSRL_IPM_CCSequence(LHS)) {
+    unsigned CCVal = RHS->getZExtValue();
+    if (convertCCValToCCMask(CCVal))
+      return true;
+    CCValid = RestoreCCValid;
+    return false;
+  }
+  if (LHS->getOpcode() == ISD::ADD) {
+    if (isSRL_IPM_CCSequence(LHS->getOperand(0).getNode())) {
+      int CCVal = RHS->getZExtValue();
+      // (unsigned) CCVal - 1 or (unsigned) CCVal - 3 Inverted.
+      // CCMask == SystemZ::CCMASK_CMP_LT, CCVal <= 2 => CC == 1 || CC == 2.
+      // CCMask == SystemZ::CCMASK_CMP_LT and CCVal <= 3 =>
+      // CC == 1 || CC == 2 || CC == 3.
+      auto *AddConstOp = dyn_cast<ConstantSDNode>((LHS->getOperand(1)));
+      int AddConst = AddConstOp->getZExtValue();
+      bool Invert = false;
+      if (CCVal < 0) {
+        Invert = !Invert;
+        // setult unsigned(-2), AddConst == -3.
+        AddConst = AddConst & 0x3;
+      } else
+        AddConst = ~AddConst + 1;
+      // As original CCMask of of SELECT_CCMASK/BR_CCMASK does not have
+      // <= or >=.
+      CCVal &= 0x3;
+      CCVal += AddConst;
+      if (convertCCValToCCMask(CCVal)) {
+        // CCVal can not zero here.
+        CCMask ^= SystemZ::CCMASK_CMP_EQ;
+        if (Invert)
+          CCMask ^= SystemZ::CCMASK_ANY;
+        return true;
+      }
+    }
+    CCValid = RestoreCCValid;
+    return false;
+  }
+
+  // Optimize (ICMP (XOR (OP1 OP2))), OP1 or OP2 could be XOR again.
+  // One or both of operands could be (SELECT_CCMASK (ICMP (SRL (IPM (CC))))).
+  if (LHS->getOpcode() == ISD::XOR) {
+    SDValue XORReg = CCReg->getOperand(0);
+    bool Invert = false;
+    if (CCMask == SystemZ::CCMASK_CMP_NE)
+      Invert = !Invert;
+    // If both the operands are select_cc.
+    if (combineCCIPMMask(XORReg, CCValid, CCMask)) {
+      CCReg = XORReg;
+      CCValid = SystemZ::CCMASK_ANY;
+      return true;
+    }
+    // Handle the case when one of the operand is select_cc and other operand
+    // could be xor again having both operands as select_cc.
+    auto *XOROp1 = LHS->getOperand(0).getNode();
+    auto *XOROp2 = LHS->getOperand(1).getNode();
+    if (!XOROp1 || !XOROp2)
+      return false;
+    if (XOROp1->getOpcode() == SystemZISD::SELECT_CCMASK ||
+        XOROp2->getOpcode() == SystemZISD::SELECT_CCMASK) {
+      auto *XOROp =
+          XOROp1->getOpcode() == SystemZISD::SELECT_CCMASK ? XOROp1 : XOROp2;
+      auto *CCMaskNode = dyn_cast<ConstantSDNode>(XOROp->getOperand(3));
+      auto *CCValidNode = dyn_cast<ConstantSDNode>(XOROp->getOperand(2));
+      if (!CCValidNode || !CCMaskNode)
+        return false;
+      int CCValidVal = CCValidNode->getZExtValue();
+      int CCMaskVal = CCMaskNode->getZExtValue();
+      SDValue XORReg1 = XOROp->getOperand(4);
+      SDValue XORReg2 = LHS->getOperand(1);
+      int CCMaskVal1 = CCMaskVal, CCMaskVal2 = CCMaskVal;
+      if (combineCCIPMMask(XORReg1, CCValidVal, CCMaskVal1) &&
+          combineCCIPMMask(XORReg2, CCValidVal, CCMaskVal2)) {
+        CCMask = CCMaskVal1 ^ CCMaskVal2;
+        CCReg = XORReg1;
+        CCValid = SystemZ::CCMASK_ANY;
+        return true;
+      }
+    }
+  }
+  CCValid = RestoreCCValid;
+  return false;
+}
+
 static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
   // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
   // set by the CCReg instruction using the CCValid / CCMask masks,
@@ -7744,6 +8202,134 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
   return false;
 }
 
+std::optional<SDValue>
+SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
+                                                  DAGCombinerInfo &DCI) const {
+  SelectionDAG &DAG = DCI.DAG;
+  // Check if N has SystemZ::CC operand.
+  const auto isCCOperand = [](SDNode *N) {
+    if (!N || N->getNumOperands() < 2)
+      return false;
+    auto *RN = dyn_cast<RegisterSDNode>(N->getOperand(1));
+    if (!RN || !RN->getReg().isPhysical() || RN->getReg() != SystemZ::CC)
+      return false;
+    return true;
+  };
+
+  auto *TrueVal = dyn_cast<ConstantSDNode>(N->getOperand(0));
+  auto *FalseVal = dyn_cast<ConstantSDNode>(N->getOperand(1));
+  // Already handled the case both operands constant in combineCCMask.
+  // Not yet encountered the case where both operands not constants,
+  // that case can be handled by removing this condition.
+  if (!((TrueVal != nullptr) ^ (FalseVal != nullptr)))
+    return std::nullopt;
+
+  SDValue CCOp = TrueVal ? N->getOperand(1) : N->getOperand(0);
+  auto *CCOpNode = CCOp.getNode();
+  if (!CCOpNode || CCOpNode->getOpcode() != SystemZISD::SELECT_CCMASK)
+    return std::nullopt;
+
+  auto *TrueValOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(0));
+  auto *FalseValOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(1));
+  bool InvertOp1 = false, InvertOp2 = false;
+  // Check if outer select_cc and inner select_cc True/False matching or
+  // inverted.
+  if (TrueVal) {
+    if (FalseValOp && TrueVal->getZExtValue() == FalseValOp->getZExtValue())
+      InvertOp2 = !InvertOp2;
+    else if (!TrueValOp || TrueVal->getZExtValue() != TrueValOp->getZExtValue())
+      return std::nullopt;
+  } else if (FalseVal) {
+    if (TrueValOp && FalseVal->getZExtValue() == TrueValOp->getZExtValue())
+      InvertOp1 = !InvertOp1;
+    else if (!FalseValOp ||
+             FalseVal->getZExtValue() != FalseValOp->getZExtValue())
+      return std::nullopt;
+  }
+
+  auto *CCValidNode = dyn_cast<ConstantSDNode>(N->getOperand(2));
+  auto *CCMaskNode = dyn_cast<ConstantSDNode>(N->getOperand(3));
+  auto *CCValidOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(2));
+  auto *CCMaskOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(3));
+  if (!CCValidNode || !CCMaskNode || !CCMaskOp || !CCValidOp)
+    return std::nullopt;
+
+  int CCValid = CCValidNode->getZExtValue();
+  int CCMaskValOp = CCMaskOp->getZExtValue();
+  int CCValidValOp = CCValidOp->getZExtValue();
+  int CCMask = CCMaskNode->getZExtValue();
+  bool IsUnionMask = CCMask == SystemZ::CCMASK_CMP_EQ;
+  if (CCValid != SystemZ::CCMASK_ICMP)
+    return std::nullopt;
+
+  SDValue CCReg = N->getOperand(4);
+  SDValue CCRegOp = CCOpNode->getOperand(4);
+  // Combine current select_cc.
+  if (combineCCIPMMask(CCReg, CCValid, CCMask)) {
+    if (InvertOp1)
+      CCMask ^= SystemZ::CCMASK_ANY;
+    // There are two scenarios here.
+    // Case 1. Inner (ICMP (SELECT_CCMASK)) has not already been combined into
+    // SELECT_CCMASK. Compute  CCMask after optimization.
+    // Case 2. Inner (ICMP (SELECT_CCMASK)) already been combined into
+    // SELECT_CCMASK. Check for isCCOperand. In this case we will not know
+    // original CCMask, but if only one bit is set in CCMaskValOp, that means
+    // original CCMask was SystemZ::CCMASK_CMP_EQ.
+    if (!combineCCIPMMask(CCRegOp, CCValidValOp, CCMaskValOp) &&
+        !isCCOperand(CCRegOp.getNode()))
+      return std::nullopt;
+    // If outer SELECT_CCMASK is CCMASK_CMP_EQ or single bit is set in
+    // CCMaskValOp(inner SELECT_CCMASK is CCMASK_CMP_EQ).
+    bool OnlyOneBitSet = CCMaskValOp && !(CCMaskValOp & (CCMaskValOp - 1));
+    // Original CCMask of current  SELECT_CCMASK is SystemZ::CCMASK_CMP_EQ,
+    // or Original CCMask of inner SELECT_CCMASK before actual CCMask
+    // computation is SystemZ::CCMASK_CMP_EQ.
+    IsUnionMask =
+        IsUnionMask || CCMaskValOp == SystemZ::CCMASK_CMP_EQ || OnlyOneBitSet;
+    if (InvertOp2)
+      CCMaskValOp ^= SystemZ::CCMASK_ANY;
+    if (IsUnionMask)
+      CCMask |= CCMaskValOp;
+    // Original outer SELECT_CCMASK has CCMask one of SystemZ::CCMASK_CMP_LT,
+    // SystemZ::CCMASK_CMP_LT,  SystemZ::CCMASK_CMP_NE,
+    // and inner CCMaskValOP is also not SystemZ::CCMASK_CMP_EQ,
+    // Taking intersection. In case of outer SystemZ::CCMASK_CMP_NE and inner
+    // as well, !(!a || !b) => (a & b).
+    else
+      CCMask &= CCMaskValOp;
+    auto Op0 = CCOpNode->getOperand(0);
+    auto Op1 = CCOpNode->getOperand(1);
+    // Inner select_cc True/False is inverted w.r.t outer. We are using inner
+    // select_cc to get CCRegOp and CCOpNode.
+    if (InvertOp2)
+      std::swap(Op0, Op1);
+    return DAG.getNode(
+        SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), Op0, Op1,
+        DAG.getTargetConstant(CCValid, SDLoc(N), MVT::i32),
+        DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32), CCRegOp);
+  }
+  return std::nullopt;
+}
+
+bool SystemZTargetLowering::canLowerSRL_IPM_Switch(SDValue Cond) const {
+  auto *SRL = Cond.getNode();
+  if (!SRL || SRL->getOpcode() != ISD::SRL)
+    return false;
+  auto *SRLCount = dyn_cast<ConstantSDNode>(SRL->getOperand(1));
+  if (!SRLCount || SRLCount->getZExtValue() != SystemZ::IPM_CC)
+    return false;
+  auto *IPM = SRL->getOperand(0).getNode();
+  if (!IPM || IPM->getOpcode() != SystemZISD::IPM)
+    return false;
+  auto IPMOp0 = IPM->getOperand(0).getNode();
+  if (!IPMOp0 || IPMOp0->getNumOperands() < 2)
+    return false;
+  auto RN = dyn_cast<RegisterSDNode>(IPMOp0->getOperand(1));
+  if (!RN || !RN->getReg().isPhysical() || RN->getReg() != SystemZ::CC)
+    return false;
+  return true;
+}
+
 SDValue SystemZTargetLowering::combineBR_CCMASK(
     SDNode *N, DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -7759,7 +8345,9 @@ SDValue SystemZTargetLowering::combineBR_CCMASK(
   SDValue Chain = N->getOperand(0);
   SDValue CCReg = N->getOperand(4);
 
-  if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
+  // combineCCIPMMask tries to combine srl/ipm sequence for flag output operand.
+  if (combineCCIPMMask(CCReg, CCValidVal, CCMaskVal) ||
+      combineCCMask(CCReg, CCValidVal, CCMaskVal))
     return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0),
                        Chain,
                        DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32),
@@ -7770,6 +8358,12 @@ SDValue SystemZTargetLowering::combineBR_CCMASK(
 
 SDValue SystemZTargetLowering::combineSELECT_CCMASK(
     SDNode *N, DAGCombinerInfo &DCI) const {
+  // Try to combine select_cc with select_cc for flag output operand.
+  // select_cc may have one of True/Flase Operand SDValue.
+  std::optional<SDValue> Res = combineSELECT_CC_CCIPMMask(N, DCI);
+  if (Res.has_value())
+    return Res.value();
+
   SelectionDAG &DAG = DCI.DAG;
 
   // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK.
@@ -7782,7 +8376,9 @@ SDValue SystemZTargetLowering::combineSELECT_CCMASK(
   int CCMaskVal = CCMask->getZExtValue();
   SDValue CCReg = N->getOperand(4);
 
-  if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
+  // combineCCIPMMask tries to combine srl/ipm sequence for flag output operand.
+  if (combineCCIPMMask(CCReg, CCValidVal, CCMaskVal) ||
+      combineCCMask(CCReg, CCValidVal, CCMaskVal))
     return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
                        N->getOperand(0), N->getOperand(1),
                        DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32),
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 3c06c1fdf2b1b..0bf103a5ceae5 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -510,6 +510,16 @@ class SystemZTargetLowering : public TargetLowering {
   bool shouldExpandCmpUsingSelects(EVT VT) const override { return true; }
 
   const char *getTargetNodeName(unsigned Opcode) const override;
+
+  // Check for if flag output operands has SRL/IPM Sequence.
+  bool canLowerSRL_IPM_Switch(SDValue Cond) const override;
+
+  // Handle Lowering flag assembly outputs.
+  SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
+                                      const SDLoc &DL,
+                                      const AsmOperandInfo &Constraint,
+                                      SelectionDAG &DAG) const override;
+
   std::pair<unsigned, const TargetRegisterClass *>
   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
                                StringRef Constraint, MVT VT) const override;
@@ -744,7 +754,11 @@ class SystemZTargetLowering : public TargetLowering {
   SDValue combineINT_TO_FP(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
+  std::optional<SDValue> combineBR_CCJoinIPMMask(SDNode *N,
+                                                 DAGCombinerInfo &DCI) const;
   SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
+  std::optional<SDValue> combineSELECT_CC_CCIPMMask(SDNode *N,
+                                                    DAGCombinerInfo &DCI) const;
   SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const;
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand.ll
new file mode 100644
index 0000000000000..a816b560e99e4
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand.ll
@@ -0,0 +1,500 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations
+; for AND for 3 three different functions, including two test cases from heiko.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+
+; Test CC == 0 && CC == 1.
+define signext i32 @foo_01(i32 noundef signext %x) {
+; CHECK-LABEL: foo_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+declare void @llvm.assume(i1 noundef) #1
+
+; Test CC == 0 && CC == 2.
+define signext i32 @foo_02(i32 noundef signext %x) {
+; CHECK-LABEL: foo_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test CC == 0 && CC == 3.
+define signext i32 @foo_03(i32 noundef signext %x) {
+; CHECK-LABEL: foo_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test CC == 1 && CC == 2.
+define signext i32 @foo_12(i32 noundef signext %x) {
+; CHECK-LABEL: foo_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test CC == 1 && CC == 3.
+define signext i32 @foo_13(i32 noundef signext %x) {
+; CHECK-LABEL: foo_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test CC == 2 && CC == 3.
+define signext i32 @foo_23(i32 noundef signext %x) {
+; CHECK-LABEL: foo_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test CC == 0 && CC == 1 && CC == 2.
+define signext i32 @foo_012(i32 noundef signext %x) {
+; CHECK-LABEL: foo_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test CC == 0 && CC == 1 && CC == 3.
+define signext i32 @foo_013(i32 noundef signext %x) {
+; CHECK-LABEL: foo_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test CC == 0 && CC == 2 && CC == 3.
+define signext i32 @foo_023(i32 noundef signext %x) {
+; CHECK-LABEL: foo_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test CC == 1 && CC == 2 && CC == 3.
+define signext i32 @foo_123(i32 noundef signext %x) {
+; CHECK-LABEL: foo_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+
+ at a = global i32 0, align 4
+
+; Test CC == 0 && CC == 1.
+define i64 @fu_01() {
+; CHECK-LABEL: fu_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test CC == 0 && CC == 2.
+define i64 @fu_02() {
+; CHECK-LABEL: fu_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test CC == 0 && CC == 3.
+define i64 @fu_03() {
+; CHECK-LABEL: fu_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test CC == 1 && CC == 2.
+define i64 @fu_12() {
+; CHECK-LABEL: fu_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test CC == 1 && CC == 3.
+define i64 @fu_13() {
+; CHECK-LABEL: fu_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test CC == 2 && CC == 3.
+define i64 @fu_23() {
+; CHECK-LABEL: fu_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test CC == 0 && CC == 1 && CC == 2.
+define i64 @fu_012() {
+; CHECK-LABEL: fu_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test CC == 0 && CC == 1 && CC == 3.
+define i64 @fu_013() {
+; CHECK-LABEL: fu_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test CC == 0 && CC == 2 && CC == 3.
+define i64 @fu_023() {
+; CHECK-LABEL: fu_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test CC == 1 && CC == 2 && CC == 3.
+define i64 @fu_123() {
+; CHECK-LABEL: fu_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test CC == 0 && CC == 1.
+define void @bar_01() {
+; CHECK-LABEL: bar_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 0 && CC == 2.
+define void @bar_02() {
+; CHECK-LABEL: bar_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 0 && CC == 3.
+define void @bar_03() {
+; CHECK-LABEL: bar_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 1 && CC == 2.
+define void @bar_12() {
+; CHECK-LABEL: bar_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 1 && CC == 3.
+define void @bar_13() {
+; CHECK-LABEL: bar_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 2 && CC == 3.
+define void @bar_23() {
+; CHECK-LABEL: bar_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 0 && CC == 1 && CC == 2.
+define void @bar_012() {
+; CHECK-LABEL: bar_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 0 && CC == 1 && CC == 3.
+define void @bar_013() {
+; CHECK-LABEL: bar_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 0 && CC == 2 && CC == 3.
+define void @bar_023() {
+; CHECK-LABEL: bar_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 1 && CC == 2 && CC == 3.
+define void @bar_123() {
+; CHECK-LABEL: bar_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_eq_noteq.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_eq_noteq.ll
new file mode 100644
index 0000000000000..c9c7e7c5ed418
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_eq_noteq.ll
@@ -0,0 +1,939 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations
+; for AND  for 3 three different functions, including two tests from heiko.
+; This test checks combinations of EQUAL(==) and NOT EQUAL (!=) operator. e.g.
+; CC == 0 && CC != 1 && CC != 2 and  CC == 0 && CC == 2 && CC != 3.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+
+; Test CC == 0 && CC != 1.
+define signext range(i32 0, 43) i32 @foo_01(i32 noundef signext %x) {
+; CHECK-LABEL: foo_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %cond = select i1 %cmp, i32 42, i32 0
+  ret i32 %cond
+}
+
+declare void @llvm.assume(i1 noundef) #1
+
+; Test CC == 0 && CC != 2
+define signext range(i32 0, 43) i32 @foo_02(i32 noundef signext %x) {
+; CHECK-LABEL: foo_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %cond = select i1 %cmp, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 && CC != 3.
+define signext range(i32 0, 43) i32 @foo_03(i32 noundef signext %x) {
+; CHECK-LABEL: foo_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %cond = select i1 %cmp, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 1 && CC != 2
+define signext range(i32 0, 43) i32 @foo_12(i32 noundef signext %x) {
+; CHECK-LABEL: foo_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 1
+  %cond = select i1 %cmp, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 1 && CC != 3
+define signext range(i32 0, 43) i32 @foo_13(i32 noundef signext %x) {
+; CHECK-LABEL: foo_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB4_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 1
+  %cond = select i1 %cmp, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 2 && CC != 3.
+define signext range(i32 0, 43) i32 @foo_23(i32 noundef signext %x) {
+; CHECK-LABEL: foo_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB5_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 && CC != 1 && CC != 2.
+define signext range(i32 0, 43) i32 @foo_012(i32 noundef signext %x) {
+; CHECK-LABEL: foo_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB6_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %spec.select = select i1 %cmp, i32 42, i32 0
+  ret i32 %spec.select
+}
+
+; Test CC == 0 && CC != 1 && CC != 3.
+define signext range(i32 0, 43) i32 @foo_013(i32 noundef signext %x) {
+; CHECK-LABEL: foo_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB7_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %spec.select = select i1 %cmp, i32 42, i32 0
+  ret i32 %spec.select
+}
+
+; Test CC == 0 && CC != 2 && CC != 3.
+define signext range(i32 0, 43) i32 @foo_023(i32 noundef signext %x) {
+; CHECK-LABEL: foo_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB8_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %spec.select = select i1 %cmp, i32 42, i32 0
+  ret i32 %spec.select
+}
+
+; Test CC == 1 && CC != 2 && CC != 3.
+define signext range(i32 0, 43) i32 @foo_123(i32 noundef signext %x) {
+; CHECK-LABEL: foo_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB9_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 1
+  %spec.select = select i1 %cmp, i32 42, i32 0
+  ret i32 %spec.select
+}
+
+; Test CC == 0 && CC == 1 && CC != 2.
+define noundef signext i32 @foo1_012(i32 noundef signext %x) {
+; CHECK-LABEL: foo1_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test CC == 0 && CC == 1 && CC != 3.
+define noundef signext i32 @foo1_013(i32 noundef signext %x) {
+; CHECK-LABEL: foo1_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test CC == 0 && CC == 2 && CC != 3
+define noundef signext i32 @foo1_023(i32 noundef signext %x) {
+; CHECK-LABEL: foo1_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test CC == 1 && CC == 2 && CC != 3.
+define noundef signext i32 @foo1_123(i32 noundef signext %x) {
+; CHECK-LABEL: foo1_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+ at a = global i32 0, align 4
+
+; Test CC == 0 && CC != 1.
+define range(i64 5, 9) i64 @fu_01() {
+; CHECK-LABEL: fu_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB14_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 0 || CC != 2.
+define range(i64 5, 9) i64 @fu_02() {
+; CHECK-LABEL: fu_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB15_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 0 || CC != 3.
+define range(i64 5, 9) i64 @fu_03() {
+; CHECK-LABEL: fu_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB16_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 1 || CC != 2.
+define range(i64 5, 9) i64 @fu_12() {
+; CHECK-LABEL: fu_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB17_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 1 || CC != 3.
+define range(i64 5, 9) i64 @fu_13() {
+; CHECK-LABEL: fu_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB18_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 2 || CC != 3.
+define range(i64 5, 9) i64 @fu_23() {
+; CHECK-LABEL: fu_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB19_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 0 || CC != 1 || CC != 2.
+define range(i64 5, 9) i64 @fu_012() {
+; CHECK-LABEL: fu_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB20_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 0 || CC != 1 || CC != 3.
+define range(i64 5, 9) i64 @fu_013() {
+; CHECK-LABEL: fu_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB21_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 0 || CC != 2 || CC != 3.
+define range(i64 5, 9) i64 @fu_023() {
+; CHECK-LABEL: fu_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB22_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 1 || CC != 2 || CC != 3.
+define range(i64 5, 9) i64 @fu_123() {
+; CHECK-LABEL: fu_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB23_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+; Test CC == 0 && CC != 1.
+define void @bar_01() {
+; CHECK-LABEL: bar_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB24_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Tset CC == 0 || CC == 1 || CC != 2.
+define noundef i64 @fu1_012() {
+; CHECK-LABEL: fu1_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Tset CC == 0 || CC == 1 || CC != 3.
+define noundef i64 @fu1_013() {
+; CHECK-LABEL: fu1_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Tset CC == 0 || CC == 2 || CC != 3.
+define noundef i64 @fu1_023() {
+; CHECK-LABEL: fu1_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Tset CC == 1 || CC == 2 || CC != 3.
+define noundef i64 @fu1_123() {
+; CHECK-LABEL: fu1_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+declare void @dummy() local_unnamed_addr #1
+
+; Test CC == 0 && CC != 2
+define void @bar_02() {
+; CHECK-LABEL: bar_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB29_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 && CC != 3.
+define void @bar_03() {
+; CHECK-LABEL: bar_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB30_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 && CC != 2
+define void @bar_12() {
+; CHECK-LABEL: bar_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB31_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 && CC != 3.
+define void @bar_13() {
+; CHECK-LABEL: bar_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB32_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 2 && CC != 3.
+define void @bar_23() {
+; CHECK-LABEL: bar_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgh dummy at PLT
+; CHECK-NEXT:  .LBB33_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 2
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 && CC != 1 && CC != 3.
+define void @bar_012() {
+; CHECK-LABEL: bar_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB34_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 && CC != 1 && CC != 3.
+define void @bar_013() {
+; CHECK-LABEL: bar_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB35_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 && CC != 2 && CC != 3.
+define void @bar_023() {
+; CHECK-LABEL: bar_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB36_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 && CC != 2 && CC != 3.
+define void @bar_123() {
+; CHECK-LABEL: bar_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB37_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 && CC == 1 && CC != 2.
+define void @bar1_012() {
+; CHECK-LABEL: bar1_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 0 && CC == 1 && CC != 3.
+define void @bar1_013() {
+; CHECK-LABEL: bar1_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 0 && CC == 2 && CC != 3.
+define void @bar1_023() {
+; CHECK-LABEL: bar1_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
+; Test CC == 1 && CC == 2 && CC !=3.
+define void @bar1_123() {
+; CHECK-LABEL: bar1_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret void
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_not.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_not.ll
new file mode 100644
index 0000000000000..766bb07eef209
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_not.ll
@@ -0,0 +1,779 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations.
+; This test negate of flag_output_operand_ccand, e.g
+; CC != 0 && cc !- 1 && cc != 2 for AND for 3 three different functions, 
+; including two test cases from heiko.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+
+; Test CC != 0 && CC != 1.
+define signext range(i32 0, 43) i32 @foo_01(i32 noundef signext %x) {
+; CHECK-LABEL: foo_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ugt i32 %asmresult1, 1
+  %cond = select i1 %2, i32 42, i32 0
+  ret i32 %cond
+}
+
+declare void @llvm.assume(i1 noundef) #1
+
+; Test CC != 0 && CC != 2.
+define signext range(i32 0, 43) i32 @foo_02(i32 noundef signext %x) {
+; CHECK-LABEL: foo_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %.not = icmp eq i32 %2, 0
+  %cond = select i1 %.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC != 0 && CC != 3.
+define signext range(i32 0, 43) i32 @foo_03(i32 noundef signext %x) {
+; CHECK-LABEL: foo_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp ne i32 %asmresult1, 0
+  %cmp2 = icmp ne i32 %asmresult1, 3
+  %2 = and i1 %cmp, %cmp2
+  %cond = select i1 %2, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 1 && CC != 2.
+define signext range(i32 0, 43) i32 @foo_12(i32 noundef signext %x) {
+; CHECK-LABEL: foo_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -3
+  %3 = icmp ult i32 %2, -2
+  %cond = select i1 %3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 1 && CC != 3.
+define signext range(i32 0, 43) i32 @foo_13(i32 noundef signext %x) {
+; CHECK-LABEL: foo_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB4_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %.not.not = icmp eq i32 %2, 0
+  %cond = select i1 %.not.not, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 2 && CC != 3.
+define signext range(i32 0, 43) i32 @foo_23(i32 noundef signext %x) {
+; CHECK-LABEL: foo_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB5_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %asmresult1, -4
+  %3 = icmp samesign ult i32 %2, -2
+  %cond = select i1 %3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 0 && CC != 1 && CC != 2
+define signext range(i32 0, 43) i32 @foo_012(i32 noundef signext %x) {
+; CHECK-LABEL: foo_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bnor %r14
+; CHECK-NEXT:  .LBB6_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %or.cond = icmp samesign ugt i32 %asmresult1, 1
+  %cmp3.not = icmp eq i32 %asmresult1, 2
+  %2 = select i1 %cmp3.not, i32 0, i32 42
+  %cond = select i1 %or.cond, i32 %2, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 0 && CC != 1 && CC != 3.
+define signext range(i32 0, 43) i32 @foo_013(i32 noundef signext %x) {
+; CHECK-LABEL: foo_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB7_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %or.cond = icmp samesign ugt i32 %asmresult1, 1
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %2 = select i1 %cmp3.not, i32 0, i32 42
+  %cond = select i1 %or.cond, i32 %2, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 0 && CC != 2 && CC != 3.
+define signext range(i32 0, 43) i32 @foo_023(i32 noundef signext %x) {
+; CHECK-LABEL: foo_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bnlr %r14
+; CHECK-NEXT:  .LBB8_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %or.cond.not = icmp eq i32 %2, 0
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %3 = or i1 %cmp3.not, %or.cond.not
+  %cond = select i1 %3, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC != 1 && CC != 2 && CC != 3.
+define signext range(i32 0, 43) i32 @foo_123(i32 noundef signext %x) {
+; CHECK-LABEL: foo_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bner %r14
+; CHECK-NEXT:  .LBB9_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -3
+  %or.cond = icmp ult i32 %2, -2
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %3 = select i1 %cmp3.not, i32 0, i32 42
+  %cond = select i1 %or.cond, i32 %3, i32 0
+  ret i32 %cond
+}
+
+ at a = dso_local global i32 0, align 4
+
+; Test CC != 0 && CC != 1.
+define dso_local range(i64 5, 9) i64 @fu_01() local_unnamed_addr #0 {
+; CHECK-LABEL: fu_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB10_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ult i32 %0, 2
+  %. = select i1 %2, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 0 && CC != 2.
+define dso_local range(i64 5, 9) i64 @fu_02() local_unnamed_addr #0 {
+; CHECK-LABEL: fu_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB11_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 0 && CC != 3.
+define dso_local range(i64 5, 9) i64 @fu_03() local_unnamed_addr #0 {
+; CHECK-LABEL: fu_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB12_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp eq i32 %0, 0
+  %cmp1.i = icmp eq i32 %0, 3
+  %.not = or i1 %cmp.i, %cmp1.i
+  %. = select i1 %.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 1 && CC != 2.
+define dso_local range(i64 5, 9) i64 @fu_12() local_unnamed_addr #0 {
+; CHECK-LABEL: fu_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB13_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -1
+  %3 = icmp ult i32 %2, 2
+  %. = select i1 %3, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 1 && CC != 3.
+define dso_local range(i64 5, 9) i64 @fu_13() local_unnamed_addr #0 {
+; CHECK-LABEL: fu_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB14_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC != 2 && CC != 3.
+define dso_local range(i64 5, 9) i64 @fu_23() local_unnamed_addr #0 {
+; CHECK-LABEL: fu_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB15_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %3 = icmp samesign ugt i32 %2, -3
+  %. = select i1 %3, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 0 && CC != 1 && CC != 2.
+define dso_local range(i64 5, 9) i64 @fu_012() local_unnamed_addr #0 {
+; CHECK-LABEL: fu_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB16_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %narrow.not = icmp eq i32 %0, 3
+  %. = select i1 %narrow.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC != 0 && CC != 1 && CC != 3.
+define dso_local range(i64 5, 9) i64 @fu_013() local_unnamed_addr #0 {
+; CHECK-LABEL: fu_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB17_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %or.cond.i = icmp samesign ult i32 %0, 2
+  %cmp2.i = icmp eq i32 %0, 3
+  %narrow.not = or i1 %or.cond.i, %cmp2.i
+  %. = select i1 %narrow.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 0 && CC != 2 && CC != 3.
+define dso_local range(i64 5, 9) i64 @fu_023() local_unnamed_addr #0 {
+; CHECK-LABEL: fu_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlr %r14
+; CHECK-NEXT:  .LBB18_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %or.cond.not.i = icmp eq i32 %2, 0
+  %cmp2.i = icmp eq i32 %0, 3
+  %narrow.not = or i1 %cmp2.i, %or.cond.not.i
+  %. = select i1 %narrow.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 1 && CC != 2 && CC != 3.
+define dso_local range(i64 5, 9) i64 @fu_123() local_unnamed_addr #0 {
+; CHECK-LABEL: fu_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB19_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %narrow.not = icmp eq i32 %0, 0
+  %. = select i1 %narrow.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC != 0 && CC != 1.
+define void @bar_01(){
+; CHECK-LABEL: bar_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnle dummy at PLT
+; CHECK-NEXT:  .LBB20_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ult i32 %0, 2
+  br i1 %2, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+declare void @dummy() local_unnamed_addr #1
+
+; Test CC != 0 && CC != 2.
+define void @bar_02(){
+; CHECK-LABEL: bar_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnhe dummy at PLT
+; CHECK-NEXT:  .LBB21_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not = icmp eq i32 %2, 0
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 0 && CC != 3.
+define void @bar_03(){
+; CHECK-LABEL: bar_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB22_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  switch i32 %0, label %if.then [
+    i32 3, label %if.end
+    i32 0, label %if.end
+  ]
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %entry, %if.then
+  ret void
+}
+
+; Test CC != 1 && CC != 2.
+define void @bar_12(){
+; CHECK-LABEL: bar_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnlh dummy at PLT
+; CHECK-NEXT:  .LBB23_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -1
+  %3 = icmp ult i32 %2, 2
+  br i1 %3, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 1 && CC != 3.
+define void @bar_13(){
+; CHECK-LABEL: bar_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jghe dummy at PLT
+; CHECK-NEXT:  .LBB24_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 2 && CC != 3.
+define void @bar_23(){
+; CHECK-LABEL: bar_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB25_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %3 = icmp samesign ugt i32 %2, -3
+  br i1 %3, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 0 && CC != 1 && CC != 2.
+define void @bar_012(){
+; CHECK-LABEL: bar_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB26_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %narrow.not = icmp eq i32 %0, 3
+  br i1 %narrow.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 0 && CC != 1 && CC != 3.
+define void @bar_013(){
+; CHECK-LABEL: bar_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgh dummy at PLT
+; CHECK-NEXT:  .LBB27_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %switch = icmp eq i32 %0, 2
+  br i1 %switch, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret void
+}
+
+; Test CC != 0 && CC != 2 && CC != 3.
+define void @bar_023(){
+; CHECK-LABEL: bar_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB28_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %or.cond.not.i = icmp eq i32 %2, 0
+  %cmp2.i = icmp eq i32 %0, 3
+  %narrow.not = or i1 %cmp2.i, %or.cond.not.i
+  br i1 %narrow.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 1 && CC != 2 && CC != 3.
+define void @bar_123(){
+; CHECK-LABEL: bar_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB29_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %narrow.not = icmp eq i32 %0, 0
+  br i1 %narrow.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed.ll
new file mode 100644
index 0000000000000..46e162f697a73
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed.ll
@@ -0,0 +1,2427 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations.
+; This tests mixing XOR wirh OR, XOR with AND and OR with AND  with
+; different ways of parenthesizing with == operator.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O3 | FileCheck %s
+
+declare void @llvm.assume(i1 noundef)
+
+ at a = dso_local global i32 0, align 4
+
+; Test ((cc == 0) || (cc == 1)) ^ (cc == 2)
+define signext range(i32 0, 43) i32 @bar_012_OR_XOR(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %xor6.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+
+; Test ((cc == 0) || (cc == 1)) ^ (cc == 3)
+define signext range(i32 0, 43) i32 @bar_013_OR_XOR(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ult i32 %asmresult1, 2
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %xor6 = xor i1 %2, %cmp3
+  %cond = select i1 %xor6, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 0) || (cc == 2)) ^ (cc == 3)
+define signext range(i32 0, 43) i32 @bar_023_OR_XOR(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnlr %r14
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %asmresult1 to i1
+  %3 = icmp ne i32 %asmresult1, 3
+  %tobool.not.not = xor i1 %3, %2
+  %cond = select i1 %tobool.not.not, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 1) || (cc == 2)) ^ (cc == 3)
+define signext range(i32 0, 43) i32 @bar_123_OR_XOR(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bner %r14
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %3 = icmp ult i32 %2, 2
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %xor6 = xor i1 %cmp3, %3
+  %cond = select i1 %xor6, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc == 2)
+define signext range(i32 0, 43) i32 @foo_012_XOR_OR(i32 noundef signext %x) {
+; CHECK-LABEL: foo_012_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB4_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc == 3)
+define signext range(i32 0, 43) i32 @foo_013_XOR_OR(i32 noundef signext %x) {
+; CHECK-LABEL: foo_013_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB5_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor8 = icmp samesign ult i32 %asmresult1, 2
+  %cmp4 = icmp eq i32 %asmresult1, 3
+  %2 = or i1 %xor8, %cmp4
+  %cond = select i1 %2, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 0) ^ (cc == 2)) || (cc == 3)
+define signext range(i32 0, 43) i32 @foo_023_XOR_OR(i32 noundef signext %x) {
+; CHECK-LABEL: foo_023_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnlr %r14
+; CHECK-NEXT:  .LBB6_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %cmp2 = icmp eq i32 %asmresult1, 2
+  %xor8 = xor i1 %cmp, %cmp2
+  %cmp4 = icmp eq i32 %asmresult1, 3
+  %2 = or i1 %cmp4, %xor8
+  %cond = select i1 %2, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 1) ^ (cc == 2)) || (cc == 3)
+define signext range(i32 0, 43) i32 @foo_123_XOR_OR(i32 noundef signext %x) {
+; CHECK-LABEL: foo_123_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bner %r14
+; CHECK-NEXT:  .LBB7_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %3 = icmp ult i32 %2, 3
+  %cond = select i1 %3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (cc == 0) || ((cc == 1) ^ (cc == 2))
+define range(i64 5, 9) i64 @fu_012_OR_XOR_a() {
+; CHECK-LABEL: fu_012_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB8_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 3
+  %. = select i1 %xor6.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test  ((cc == 0) || (cc == 1)) ^ (cc == 2)
+define range(i64 5, 9) i64 @fu_012_OR_XOR_c() {
+; CHECK-LABEL: fu_012_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB9_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i.not = icmp eq i32 %0, 3
+  %. = select i1 %xor5.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) ^ ((cc == 1) || (cc == 2))
+define range(i64 5, 9) i64 @fu_012_XOR_OR_a() {
+; CHECK-LABEL: fu_012_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB10_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %tobool.not = icmp eq i32 %0, 3
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc == 2)
+define range(i64 5, 9) i64 @fu_012_XOR_OR_c() {
+; CHECK-LABEL: fu_012_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB11_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %.not = icmp eq i32 %0, 3
+  %. = select i1 %.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) || ((cc == 1) ^ (cc == 3))
+define range(i64 5, 9) i64 @fu_013_OR_XOR_a() {
+; CHECK-LABEL: fu_013_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB12_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp ne i32 %0, 0
+  %2 = and i32 %0, 1
+  %xor6.i.not = icmp eq i32 %2, 0
+  %narrow.not = and i1 %cmp.i, %xor6.i.not
+  %. = select i1 %narrow.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc == 1)) ^ (cc == 3)
+define range(i64 5, 9) i64 @fu_013_OR_XOR_c() {
+; CHECK-LABEL: fu_013_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB13_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ugt i32 %0, 1
+  %3 = icmp ne i32 %0, 3
+  %tobool.not = and i1 %2, %3
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) ^ ((cc == 1) || (cc == 3))
+define range(i64 5, 9) i64 @fu_013_XOR_OR_a() {
+; CHECK-LABEL: fu_013_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB14_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp ne i32 %0, 0
+  %tobool.not = xor i1 %3, %2
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc == 3)
+define range(i64 5, 9) i64 @fu_013_XOR_OR_c() {
+; CHECK-LABEL: fu_013_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB15_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i = icmp samesign ugt i32 %0, 1
+  %cmp3.i = icmp ne i32 %0, 3
+  %.not = and i1 %xor7.i, %cmp3.i
+  %. = select i1 %.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test  (cc == 0) || ((cc == 2) ^ (cc == 3))
+define range(i64 5, 9) i64 @fu_023_OR_XOR_a() {
+; CHECK-LABEL: fu_023_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB16_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 1
+  %. = select i1 %xor6.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc == 2)) ^ (cc == 3)
+define range(i64 5, 9) i64 @fu_023_OR_XOR_c() {
+; CHECK-LABEL: fu_023_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bnlr %r14
+; CHECK-NEXT:  .LBB17_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp ne i32 %0, 3
+  %tobool.not.not = xor i1 %3, %2
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc == 0) ^ ((cc == 2) || (cc == 3)).
+define range(i64 5, 9) i64 @fu_023_XOR_OR_a() {
+; CHECK-LABEL: fu_023_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB18_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i.not = icmp eq i32 %0, 1
+  %. = select i1 %xor7.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; ((cc == 0) ^ (cc == 2)) || (cc == 3)
+define range(i64 5, 9) i64 @fu_023_XOR_OR_c() {
+; CHECK-LABEL: fu_023_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB19_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %xor7.i.not = icmp ne i32 %2, 0
+  %cmp3.i = icmp ne i32 %0, 3
+  %.not = and i1 %cmp3.i, %xor7.i.not
+  %. = select i1 %.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 1) || ((cc == 2) ^ (cc == 3))
+define range(i64 5, 9) i64 @fu_123_OR_XOR_a() {
+; CHECK-LABEL: fu_123_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB20_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 0
+  %. = select i1 %xor6.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 1) || (cc == 2)) ^ (cc == 3)
+define range(i64 5, 9) i64 @fu_123_OR_XOR_c() {
+; CHECK-LABEL: fu_123_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB21_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %tobool.not = icmp eq i32 %0, 0
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 1 ) ^ ((cc == 2) || (cc == 3)).
+define range(i64 5, 9) i64 @fu_123_XOR_OR_a() {
+; CHECK-LABEL: fu_123_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB22_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i.not = icmp eq i32 %0, 0
+  %. = select i1 %xor7.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 1) ^ (cc == 2)) || (cc == 3)
+define range(i64 5, 9) i64 @fu_123_XOR_OR_c() {
+; CHECK-LABEL: fu_123_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB23_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp eq i32 %0, 0
+  %. = select i1 %2, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) || ((cc == 1) ^ (cc == 2))
+define i64 @bar_012_OR_XOR_a() {
+; CHECK-LABEL: bar_012_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB24_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 3
+  br i1 %xor6.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test  ((cc == 0) || (cc == 1)) ^ (cc == 2)
+define i64 @bar_012_OR_XOR_c() {
+; CHECK-LABEL: bar_012_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB25_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i.not = icmp eq i32 %0, 3
+  br i1 %xor5.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) ^ ((cc == 1) || (cc == 2))
+define i64 @bar_012_XOR_OR_a() {
+; CHECK-LABEL: bar_012_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB26_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %tobool.not = icmp eq i32 %0, 3
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc == 2)
+define i64 @bar_012_XOR_OR_c() {
+; CHECK-LABEL: bar_012_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB27_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %.not = icmp eq i32 %0, 3
+  br i1 %.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) || ((cc == 1) ^ (cc == 3))
+define i64 @bar_013_OR_XOR_a() {
+; CHECK-LABEL: bar_013_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB28_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp ne i32 %0, 0
+  %2 = and i32 %0, 1
+  %xor6.i.not = icmp eq i32 %2, 0
+  %narrow.not = and i1 %cmp.i, %xor6.i.not
+  br i1 %narrow.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc == 1)) ^ (cc == 3)
+define i64 @bar_013_OR_XOR_c() {
+; CHECK-LABEL: bar_013_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB29_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %switch = icmp eq i32 %0, 2
+  br i1 %switch, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret i64 undef
+}
+
+; Test (cc == 0) ^ ((cc == 1) || (cc == 3))
+define i64 @bar_013_XOR_OR_a() {
+; CHECK-LABEL: bar_013_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB30_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp ne i32 %0, 0
+  %tobool.not = xor i1 %3, %2
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc == 3)
+define i64 @bar_013_XOR_OR_c() {
+; CHECK-LABEL: bar_013_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB31_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %switch = icmp eq i32 %0, 2
+  br i1 %switch, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret i64 undef
+}
+
+; Test  (cc == 0) || ((cc == 2) ^ (cc == 3))
+define i64 @bar_023_OR_XOR_a() {
+; CHECK-LABEL: bar_023_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB32_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 1
+  br i1 %xor6.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc == 2)) ^ (cc == 3)
+define i64 @bar_023_OR_XOR_c() {
+; CHECK-LABEL: bar_023_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB33_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp ne i32 %0, 3
+  %tobool.not.not = xor i1 %3, %2
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) ^ ((cc == 2) || (cc == 3))
+define i64 @bar_023_XOR_OR_a() {
+; CHECK-LABEL: bar_023_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB34_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i.not = icmp eq i32 %0, 1
+  br i1 %xor7.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc == 2)) || (cc == 3)
+define i64 @bar_023_XOR_OR_c() {
+; CHECK-LABEL: bar_023_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB35_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %xor7.i.not = icmp ne i32 %2, 0
+  %cmp3.i = icmp ne i32 %0, 3
+  %.not = and i1 %cmp3.i, %xor7.i.not
+  br i1 %.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 1) || ((cc == 2) ^ (cc == 3))
+define i64 @bar_123_OR_XOR_a() {
+; CHECK-LABEL: bar_123_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB36_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 0
+  br i1 %xor6.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) || (cc == 2)) ^ (cc == 3)
+define i64 @bar_123_OR_XOR_c() {
+; CHECK-LABEL: bar_123_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB37_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %tobool.not = icmp eq i32 %0, 0
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 1 ) ^ ((cc == 2) || (cc == 3))
+define i64 @bar_123_XOR_OR_a() {
+; CHECK-LABEL: bar_123_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB38_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i.not = icmp eq i32 %0, 0
+  br i1 %xor7.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) ^ (cc == 2)) || (cc == 3)
+define i64 @bar_123_XOR_OR_c() {
+; CHECK-LABEL: bar_123_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB39_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp eq i32 %0, 0
+  br i1 %2, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test OR_AND and AND_OR
+; Test (((cc == 0) || (cc == 1)) && (cc == 3))
+define  noundef signext range(i32 0, 43) i32 @bar_013_OR_AND(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_013_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test (((cc == 0) || (cc == 2)) && (cc == 3))
+define  noundef signext range(i32 0, 43) i32 @bar_023_OR_AND(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test (((cc == 1) || (cc == 2)) && (cc == 3))
+define  noundef signext range(i32 0, 43) i32 @bar_123_OR_AND(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test (((cc == 0) && (cc == 1)) || (cc == 2))
+define  signext range(i32 0, 43) i32 @bar_012_AND_OR_a(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_012_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB43_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc == 1)) || (cc == 3))
+define  signext range(i32 0, 43) i32 @bar_013_AND_OR_a(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_013_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB44_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc == 2)) || (cc == 3))
+define  signext range(i32 0, 43) i32 @bar_023_AND_OR_a(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_023_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB45_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 1) && (cc == 2)) || (cc == 3))
+define  signext range(i32 0, 43) i32 @bar_123_AND_OR_a(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_123_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB46_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 0) && ((cc == 1)) || (cc == 2))
+define  signext range(i32 0, 43) i32 @bar_012_AND_OR_b(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_012_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB47_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 0) && ((cc == 1)) || (cc == 3))
+define  signext range(i32 0, 43) i32 @bar_013_AND_OR_b(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_013_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB48_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 0) && ((cc == 2)) || (cc == 3))
+define  signext range(i32 0, 43) i32 @bar_023_AND_OR_b(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_023_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB49_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 1) && ((cc == 2)) || (cc == 3))
+define  signext range(i32 0, 43) i32 @bar_123_AND_OR_b(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_123_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB50_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) ^ (cc == 1)) && (cc == 2))
+define  noundef signext range(i32 0, 43) i32 @bar_012_XOR_AND(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_012_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test (((cc == 0) ^ (cc == 1)) && (cc == 3))
+define  noundef signext range(i32 0, 43) i32 @bar_013_XOR_AND(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_013_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test (((cc == 0) ^ (cc == 2)) && (cc == 3))
+define  noundef signext range(i32 0, 43) i32 @bar_023_XOR_AND(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test (((cc == 1) ^ (cc == 2)) && (cc == 3))
+define  noundef signext range(i32 0, 43) i32 @bar_123_XOR_AND(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_123_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test ((cc == 0) && (cc == 1)) ^ (cc == 2))
+define  signext range(i32 0, 43) i32 @bar_012_AND_XOR_a(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB55_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 0) && (cc == 1)) ^ (cc == 3))
+define  signext range(i32 0, 43) i32 @bar_013_AND_XOR_a(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_013_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB56_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 0) && (cc == 2)) ^ (cc == 3))
+define  signext range(i32 0, 43) i32 @bar_023_AND_XOR_a(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB57_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 1) && (cc == 2)) ^ (cc == 3))
+define  signext range(i32 0, 43) i32 @bar_123_AND_XOR_a(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB58_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 0) && ((cc == 1)) ^ (cc == 2))
+define  noundef signext i32 @bar_012_AND_XOR_b(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test ((cc == 0) && ((cc == 1)) ^ (cc == 3))
+define  noundef signext i32 @bar_013_AND_XOR_b(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test ((cc == 0) && ((cc == 2)) ^ (cc == 3))
+define  noundef signext i32 @bar_023_AND_XOR_b(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test ((cc == 1) && ((cc == 2)) ^ (cc == 3))
+define  noundef signext i32 @bar_123_AND_XOR_b(i32 noundef signext %x)  {
+; CHECK-LABEL: bar_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test ((cc == 0) || (cc == 1)) && (cc == 2)
+define noundef range(i64 5, 9) i64 @fu_012_OR_AND() {
+; CHECK-LABEL: fu_012_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) && (cc == 1)) || (cc == 2)
+define range(i64 5, 9) i64 @fu_012_AND_OR_a() {
+; CHECK-LABEL: fu_012_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB64_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc == 1) || (cc == 2)
+define noundef i64 @fu_012_AND_OR_b() {
+; CHECK-LABEL: fu_012_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) || (cc == 1)) && (cc == 3)
+define noundef range(i64 5, 9) i64 @fu_013_OR_AND() {
+; CHECK-LABEL: fu_013_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) && (cc == 1)) || (cc == 3)
+define range(i64 5, 9) i64 @fu_013_XOR_AND_OR_a() {
+; CHECK-LABEL: fu_013_XOR_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB67_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc == 1) || (cc == 3))
+define noundef i64 @fu_013_AND_OR_b() {
+; CHECK-LABEL: fu_013_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) || (cc == 2)) && (cc == 3)
+define noundef range(i64 5, 9) i64 @fu_023_OR_AND() {
+; CHECK-LABEL: fu_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) && (cc == 2)) || (cc == 3)
+define range(i64 5, 9) i64 @fu_023_AND_OR_a() {
+; CHECK-LABEL: fu_023_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB70_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc == 2) || (cc == 3)
+define noundef i64 @fu_023_AND_OR_b() {
+; CHECK-LABEL: fu_023_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 1) || (cc == 2)) && (cc == 3)
+define noundef range(i64 5, 9) i64 @fu_123_OR_AND() {
+; CHECK-LABEL: fu_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 1) && (cc == 2)) || (cc == 3)
+define range(i64 5, 9) i64 @fu_123_AND_OR_a() {
+; CHECK-LABEL: fu_123_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB73_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc == 1) && ((cc == 2) || (cc == 3))
+define noundef i64 @fu_123_AND_OR_b() {
+; CHECK-LABEL: fu_123_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) ^ (cc == 1)) && (cc == 2)
+define noundef range(i64 5, 9) i64 @fu_012_XOR_AND() {
+; CHECK-LABEL: fu_012_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) && (cc == 1)) ^ (cc == 2)
+define range(i64 5, 9) i64 @fu_012_AND_XOR_a() {
+; CHECK-LABEL: fu_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB76_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc == 1) ^ (cc == 2))
+define noundef i64 @fu_012_AND_XOR_b() {
+; CHECK-LABEL: fu_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) ^ (cc == 1)) && (cc == 3)
+define noundef range(i64 5, 9) i64 @fu_013_XOR_AND() {
+; CHECK-LABEL: fu_013_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) ^ (cc == 1)) && (cc == 3)
+define range(i64 5, 9) i64 @fu_013_XOR_AND_XOR_a() {
+; CHECK-LABEL: fu_013_XOR_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB79_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc == 1)) ^ (cc == 3)
+define noundef i64 @fu_013_AND_XOR_b() {
+; CHECK-LABEL: fu_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) || (cc == 2)) ^ (cc == 3)
+define range(i64 5, 9) i64 @fu_023_XOR_AND() {
+; CHECK-LABEL: fu_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) && (cc == 2)) ^ (cc == 3)
+define range(i64 5, 9) i64 @fu_023_AND_XOR_a() {
+; CHECK-LABEL: fu_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB82_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc == 2) ^ (cc == 3))
+define noundef i64 @fu_023_AND_XOR_b() {
+; CHECK-LABEL: fu_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 1) ^ (cc == 2)) && (cc == 3)
+define noundef range(i64 5, 9) i64 @fu_123_XOR_AND() {
+; CHECK-LABEL: fu_123_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 1) && (cc == 2)) ^ (cc == 3)
+define range(i64 5, 9) i64 @fu_123_AND_XOR_a() {
+; CHECK-LABEL: fu_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB85_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test  (cc == 1) && ((cc == 2) ^ (cc == 3))
+define noundef i64 @fu_123_AND_XOR_b() {
+; CHECK-LABEL: fu_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) || (cc == 1)) && (cc == 2
+define i64 @bar1_012_OR_AND() {
+; CHECK-LABEL: bar1_012_OR_AND:
+; CHECK:       # %bb.0: # %if.end
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+if.end:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+declare void @dummy() local_unnamed_addr #1
+
+; Test ((cc == 0) && (cc == 1)) || (cc == 2)
+define i64 @bar1_012_AND_OR_a() {
+; CHECK-LABEL: bar1_012_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgh dummy at PLT
+; CHECK-NEXT:  .LBB88_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc == 1) || (cc == 2)
+define i64 @bar1_012_AND_OR_b() {
+; CHECK-LABEL: bar1_012_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc == 1)) && (cc == 3)
+define i64 @bar1_013_OR_AND() {
+; CHECK-LABEL: bar1_013_OR_AND:
+; CHECK:       # %bb.0: # %if.end
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+if.end:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 1)) || (cc == 3)
+define i64 @bar1_013_XOR_AND_OR_a() {
+; CHECK-LABEL: bar1_013_XOR_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB91_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc == 1) || (cc == 3))
+define i64 @bar1_013_AND_OR_b() {
+; CHECK-LABEL: bar1_013_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc == 2)) && (cc == 3)
+define i64 @bar1_023_OR_AND() {
+; CHECK-LABEL: bar1_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 2)) || (cc == 3)
+define i64 @bar1_023_AND_OR_a() {
+; CHECK-LABEL: bar1_023_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB94_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc == 2) || (cc == 3)
+define i64 @bar1_023_AND_OR_b() {
+; CHECK-LABEL: bar1_023_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 1) || (cc == 2)) && (cc == 3)
+define i64 @bar1_123_OR_AND() {
+; CHECK-LABEL: bar1_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 1) && (cc == 2)) || (cc == 3)
+define i64 @bar1_123_AND_OR_a() {
+; CHECK-LABEL: bar1_123_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB97_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 1) && ((cc == 2) || (cc == 3))
+define i64 @bar1_123_AND_OR_b() {
+; CHECK-LABEL: bar1_123_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc == 1)) && (cc == 2)
+define i64 @bar1_012_XOR_AND() {
+; CHECK-LABEL: bar1_012_XOR_AND:
+; CHECK:       # %bb.0: # %if.end
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+if.end:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 1)) ^ (cc == 2)
+define i64 @bar1_012_AND_XOR_a() {
+; CHECK-LABEL: bar1_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgh dummy at PLT
+; CHECK-NEXT:  .LBB100_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc == 1) ^ (cc == 2))
+define i64 @bar1_012_AND_XOR_b() {
+; CHECK-LABEL: bar1_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc == 1)) && (cc == 3)
+define i64 @bar1_013_XOR_AND() {
+; CHECK-LABEL: bar1_013_XOR_AND:
+; CHECK:       # %bb.0: # %if.end
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+if.end:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc == 1)) && (cc == 3)
+define i64 @bar1_013_XOR_AND_XOR_a() {
+; CHECK-LABEL: bar1_013_XOR_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB103_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 1)) ^ (cc == 3)
+define i64 @bar1_013_AND_XOR_b() {
+; CHECK-LABEL: bar1_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc == 2)) ^ (cc == 3)
+define i64 @bar1_023_XOR_AND() {
+; CHECK-LABEL: bar1_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 2)) ^ (cc == 3)
+define i64 @bar1_023_AND_XOR_a() {
+; CHECK-LABEL: bar1_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB106_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 2)) ^ (cc == 3)
+define i64 @bar1_023_AND_XOR_b() {
+; CHECK-LABEL: bar1_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 1) ^ (cc == 2)) && (cc == 3)
+define i64 @bar1_123_XOR_AND() {
+; CHECK-LABEL: bar1_123_XOR_AND:
+; CHECK:       # %bb.0: # %if.end
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+if.end:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 1) && (cc == 2)) ^ (cc == 3)
+define i64 @bar1_123_AND_XOR_a() {
+; CHECK-LABEL: bar1_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB109_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test  (cc == 1) && ((cc == 2) ^ (cc == 3))
+define i64 @bar1_123_AND_XOR_b() {
+; CHECK-LABEL: bar1_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
new file mode 100644
index 0000000000000..3c071709b2e2a
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
@@ -0,0 +1,5248 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations.
+; Different ways of parenthesizing with mix of == and != operator for
+; AND/OR/XOR combinations.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O3 | FileCheck %s
+
+; Test ((cc == 0) || (cc != 1)) ^ (cc != 2)
+define signext range(i32 0, 43) i32 @bar_012_OR_XOR(i32 noundef signext %x){
+; CHECK-LABEL: bar_012_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %xor6 = icmp ult i32 %2, 2
+  %cond = select i1 %xor6, i32 42, i32 0
+  ret i32 %cond
+}
+
+declare void @llvm.assume(i1 noundef) #1
+
+; Test ((cc == 0) || (cc != 1)) ^ (cc != 3)
+define signext range(i32 0, 43) i32 @bar_013_OR_XOR(i32 noundef signext %x){
+; CHECK-LABEL: bar_013_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnher %r14
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2 = icmp ne i32 %asmresult1, 1
+  %cmp3 = icmp ne i32 %asmresult1, 3
+  %xor6 = xor i1 %cmp2, %cmp3
+  %cond = select i1 %xor6, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 0) || (cc != 2)) ^ (cc != 3)
+define signext range(i32 0, 43) i32 @bar_023_OR_XOR(i32 noundef signext %x){
+; CHECK-LABEL: bar_023_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6 = icmp samesign ugt i32 %asmresult1, 1
+  %cond = select i1 %xor6, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 1) || (cc != 2)) ^ (cc != 3)
+define signext range(i32 0, 43) i32 @bar_123_OR_XOR(i32 noundef signext %x){
+; CHECK-LABEL: bar_123_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6 = icmp samesign ugt i32 %asmresult1, 1
+  %cond = select i1 %xor6, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc != 2)
+define signext range(i32 0, 43) i32 @foo_012_XOR_OR(i32 noundef signext %x){
+; CHECK-LABEL: foo_012_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB4_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp4.not = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp4.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc != 3)
+define signext range(i32 0, 43) i32 @foo_013_XOR_OR(i32 noundef signext %x){
+; CHECK-LABEL: foo_013_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB5_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp4.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp4.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test ((cc == 0) ^ (cc == 2)) || (cc != 3)
+define signext range(i32 0, 43) i32 @foo_023_XOR_OR(i32 noundef signext %x){
+; CHECK-LABEL: foo_023_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB6_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp4.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp4.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test ((cc == 1) ^ (cc == 2)) || (cc != 3)
+define signext range(i32 0, 43) i32 @foo_123_XOR_OR(i32 noundef signext %x){
+; CHECK-LABEL: foo_123_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB7_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp4.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp4.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc == 0) || (cc != 1)) && (cc != 2))
+define signext range(i32 0, 43) i32 @bar_012_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB8_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %3 = icmp ult i32 %2, 2
+  %cond = select i1 %3, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc == 0) || (cc != 1)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar_013_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB9_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %.not = icmp eq i32 %2, 0
+  %cond = select i1 %.not, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) || (cc != 2)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar_023_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB10_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %.not = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %.not, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 1) || (cc != 2)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar_123_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB11_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %.not = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %.not, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc != 1)) || (cc != 2))
+define signext range(i32 0, 43) i32 @bar_012_AND_OR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB12_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 2
+  %2 = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %2
+}
+
+; Test (((cc == 0) && (cc != 1)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar_013_AND_OR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB13_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %2 = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %2
+}
+
+; Test (((cc == 0) && (cc != 2)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar_023_AND_OR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB14_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %2 = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %2
+}
+
+; Test (((cc == 1) && (cc != 2)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar_123_AND_OR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB15_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %2 = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %2
+}
+
+; Test ((cc == 0) && ((cc != 1)) || (cc != 2))
+define signext range(i32 0, 43) i32 @bar_012_AND_OR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB16_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 2
+  %2 = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %2
+}
+
+; Test ((cc == 0) && ((cc != 1)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar_013_AND_OR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB17_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %2 = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %2
+}
+
+; Test ((cc == 0) && ((cc != 2)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar_023_AND_OR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB18_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %2 = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %2
+}
+
+; Test ((cc == 1) && ((cc != 2)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar_123_AND_OR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB19_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %2 = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %2
+}
+
+; Test ((cc == 1) && ((cc != 2)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar_012_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB20_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %2, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) ^ (cc != 1)) && (cc != 2))
+define signext range(i32 0, 43) i32 @bar_013_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB21_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor8 = icmp samesign ugt i32 %asmresult1, 1
+  %cmp4 = icmp ne i32 %asmresult1, 3
+  %2 = and i1 %xor8, %cmp4
+  %cond = select i1 %2, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) ^ (cc != 2)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar_023_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB22_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %tobool = icmp ne i32 %2, 0
+  %cmp4 = icmp ne i32 %asmresult1, 3
+  %3 = and i1 %cmp4, %tobool
+  %cond = select i1 %3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 1) ^ (cc != 2)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar_123_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB23_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp eq i32 %asmresult1, 0
+  %cond = select i1 %2, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc != 1)) ^ (cc != 2))
+define signext range(i32 0, 43) i32 @bar_012_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB24_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %tobool.not = icmp eq i32 %2, 0
+  %cond = select i1 %tobool.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc != 1)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar_013_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB25_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp ne i32 %asmresult1, 0
+  %cmp3 = icmp ne i32 %asmresult1, 3
+  %xor6 = and i1 %cmp, %cmp3
+  %cond = select i1 %xor6, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc != 2)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar_023_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB26_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp ne i32 %asmresult1, 0
+  %cmp3 = icmp ne i32 %asmresult1, 3
+  %xor6 = and i1 %cmp, %cmp3
+  %cond = select i1 %xor6, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 1) && (cc != 1)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar_123_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB27_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %cond = select i1 %tobool.not.not, i32 42, i32 0
+  ret i32 %cond
+}
+
+; ((cc == 0) && ((cc != 1)) ^ (cc != 2))
+define noundef signext i32 @bar_012_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test ((cc == 0) && ((cc != 1)) ^ (cc != 3))
+define noundef signext i32 @bar_013_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test ((cc == 0) && ((cc != 2)) ^ (cc != 3))
+define noundef signext i32 @bar_023_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test ((cc == 1) && ((cc != 2)) ^ (cc != 3))
+define noundef signext i32 @bar_123_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 0
+}
+
+; Test (((cc == 0) || (cc == 1)) && (cc != 2))
+define signext range(i32 0, 43) i32 @bar1_012_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_012_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB32_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %or.cond = icmp samesign ult i32 %asmresult1, 2
+  %spec.select = select i1 %or.cond, i32 42, i32 0
+  ret i32 %spec.select
+}
+
+; Test (((cc == 0) || (cc == 1)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_013_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_013_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB33_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %or.cond = icmp samesign ult i32 %asmresult1, 2
+  %spec.select = select i1 %or.cond, i32 42, i32 0
+  ret i32 %spec.select
+}
+
+; Test (((cc == 0) || (cc == 2)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_023_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lhi %r0, 0
+; CHECK-NEXT:    jno .LBB34_3
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    jnhe .LBB34_4
+; CHECK-NEXT:  .LBB34_2: # %entry
+; CHECK-NEXT:    llgfr %r2, %r0
+; CHECK-NEXT:    br %r14
+; CHECK-NEXT:  .LBB34_3: # %entry
+; CHECK-NEXT:    lhi %r0, 42
+; CHECK-NEXT:    jhe .LBB34_2
+; CHECK-NEXT:  .LBB34_4: # %entry
+; CHECK-NEXT:    lhi %r0, 0
+; CHECK-NEXT:    llgfr %r2, %r0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %or.cond = icmp eq i32 %2, 0
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %3 = select i1 %cmp3.not, i32 0, i32 42
+  %cond = select i1 %or.cond, i32 %3, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 1) || (cc == 2)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_123_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB35_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %or.cond = icmp ult i32 %2, 2
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %3 = select i1 %cmp3.not, i32 0, i32 42
+  %cond = select i1 %or.cond, i32 %3, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc == 1)) || (cc != 2))
+define signext range(i32 0, 43) i32 @bar1_012_AND_OR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_012_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB36_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc == 1)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_013_AND_OR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_013_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB37_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc == 2)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_023_AND_OR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_023_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB38_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc == 1) && (cc == 2)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_123_AND_OR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_123_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB39_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test ((cc == 0) && ((cc == 1)) || (cc != 2))
+define signext range(i32 0, 43) i32 @bar1_012_AND_OR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_012_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB40_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test ((cc == 0) && ((cc == 1)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_013_AND_OR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_013_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB41_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test ((cc == 0) && ((cc == 2)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_023_AND_OR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_023_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB42_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test ((cc == 1) && ((cc == 2)) || (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_123_AND_OR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_123_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB43_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc == 1)) ^ (cc != 2))
+define signext range(i32 0, 43) i32 @bar1_012_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_012_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB44_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor8 = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %xor8, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc == 1)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_013_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_013_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB45_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor8 = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %xor8, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc == 2)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_023_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB46_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %cmp2 = icmp eq i32 %asmresult1, 2
+  %xor8 = xor i1 %cmp, %cmp2
+  %cond = select i1 %xor8, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 1) && (cc == 2)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_123_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_123_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB47_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %xor8 = icmp ult i32 %2, 2
+  %cond = select i1 %xor8, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc == 1)) ^ (cc != 2)
+define signext range(i32 0, 43) i32 @bar1_012_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB48_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc == 1)) ^ (cc != 3)
+define signext range(i32 0, 43) i32 @bar1_013_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_013_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB49_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc == 0) && (cc == 2)) ^ (cc != 3)
+define signext range(i32 0, 43) i32 @bar1_023_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB50_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc == 1) && (cc == 2)) ^ (cc != 3)
+define signext range(i32 0, 43) i32 @bar1_123_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB51_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test  ((cc == 0) && ((cc == 1)) ^ (cc != 2))
+define signext range(i32 0, 43) i32 @bar1_012_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB52_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %spec.select = select i1 %cmp, i32 42, i32 0
+  ret i32 %spec.select
+}
+
+; Test  ((cc == 0) && ((cc == 1)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_013_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB53_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %spec.select = select i1 %cmp, i32 42, i32 0
+  ret i32 %spec.select
+}
+
+; Test  ((cc == 0) && ((cc == 2)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_023_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB54_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %spec.select = select i1 %cmp, i32 42, i32 0
+  ret i32 %spec.select
+}
+
+; Test  ((cc == 1) && ((cc == 2)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar1_123_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar1_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB55_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 1
+  %spec.select = select i1 %cmp, i32 42, i32 0
+  ret i32 %spec.select
+}
+
+ at a = global i32 0, align 4
+;  Test ((cc == 0) || (cc != 1)) && (cc != 2)
+define range(i64 5, 9) i64 @fua_012_OR_AND() {
+; CHECK-LABEL: fua_012_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB56_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -1
+  %narrow = icmp ult i32 %2, 2
+  %. = select i1 %narrow, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc != 1)) || (cc != 2)
+define range(i64 5, 9) i64 @fua_012_AND_OR_a() {
+; CHECK-LABEL: fua_012_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB57_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc != 1) || (cc != 2))
+define range(i64 5, 9) i64 @fua_012_AND_OR_b() {
+; CHECK-LABEL: fua_012_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB58_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc != 1)) && (cc != 3)
+define range(i64 5, 9) i64 @fua_013_OR_AND() {
+; CHECK-LABEL: fua_013_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB59_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc != 1)) || (cc != 3).
+define range(i64 5, 9) i64 @fua_013_AND_OR_a() {
+; CHECK-LABEL: fua_013_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB60_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc != 1) || (cc != 3))
+define range(i64 5, 9) i64 @fua_013_AND_OR_b() {
+; CHECK-LABEL: fua_013_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB61_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc != 2)) && (cc != 3)
+define range(i64 5, 9) i64 @fua_023_OR_AND() {
+; CHECK-LABEL: fua_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB62_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %narrow = icmp samesign ugt i32 %2, -3
+  %. = select i1 %narrow, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc != 2)) || (cc != 3)
+define range(i64 5, 9) i64 @fua_023_AND_OR_a() {
+; CHECK-LABEL: fua_023_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB63_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc != 2) || (cc != 3)).
+define range(i64 5, 9) i64 @fua_023_AND_OR_b() {
+; CHECK-LABEL: fua_023_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB64_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 1) || (cc != 2)) && (cc != 3)
+define range(i64 5, 9) i64 @fua_123_OR_AND() {
+; CHECK-LABEL: fua_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB65_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %narrow = icmp samesign ugt i32 %2, -3
+  %. = select i1 %narrow, i64 8, i64 5
+  ret i64 %.
+}
+
+; Tset ((cc == 1) && (cc != 2)) || (cc != 3).
+define range(i64 5, 9) i64 @fua_123_AND_OR_a() {
+; CHECK-LABEL: fua_123_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB66_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 1) && ((cc != 2) || (cc != 3)).
+define range(i64 5, 9) i64 @fua_123_AND_OR_b() {
+; CHECK-LABEL: fua_123_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB67_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) ^ (cc != 1)) && (cc != 2)
+define range(i64 5, 9) i64 @fua_012_XOR_AND() {
+; CHECK-LABEL: fua_012_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB68_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %.not = icmp eq i32 %0, 3
+  %. = select i1 %.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc != 1)) ^ (cc != 2)
+define range(i64 5, 9) i64 @fua_012_AND_XOR_a() {
+; CHECK-LABEL: fua_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB69_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) ^ (cc != 1)) && (cc != 3).
+define noundef i64 @fua_012_AND_XOR_b() {
+; CHECK-LABEL: fua_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) && (cc != 1)) ^ (cc != 3).
+define range(i64 5, 9) i64 @fua_013_XOR_AND() {
+; CHECK-LABEL: fua_013_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB71_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i = icmp samesign ult i32 %0, 2
+  %cmp3.i = icmp eq i32 %0, 3
+  %.not = or i1 %xor7.i, %cmp3.i
+  %. = select i1 %.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc != 1) ^ (cc != 3))
+define range(i64 5, 9) i64 @fua_013_XOR_AND_XOR_a() {
+; CHECK-LABEL: fua_013_XOR_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB72_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp eq i32 %0, 0
+  %cmp2.i = icmp eq i32 %0, 3
+  %xor5.i.not = or i1 %cmp.i, %cmp2.i
+  %. = select i1 %xor5.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test  (cc == 0) && ((cc != 1) ^ (cc != 3)).
+define noundef i64 @fua_013_AND_XOR_b() {
+; CHECK-LABEL: fua_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) && (cc != 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fua_023_XOR_AND() {
+; CHECK-LABEL: fua_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB74_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp eq i32 %0, 0
+  %cmp2.i = icmp eq i32 %0, 3
+  %xor5.i.not = or i1 %cmp.i, %cmp2.i
+  %. = select i1 %xor5.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc != 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fua_023_AND_XOR_a() {
+; CHECK-LABEL: fua_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB75_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp eq i32 %0, 0
+  %cmp2.i = icmp eq i32 %0, 3
+  %xor5.i.not = or i1 %cmp.i, %cmp2.i
+  %. = select i1 %xor5.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test  (cc == 0) && ((cc != 2) ^ (cc != 3)).
+define noundef i64 @fua_023_AND_XOR_b() {
+; CHECK-LABEL: fua_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 1) ^ (cc != 2)) && (cc != 3).
+define range(i64 5, 9) i64 @fua_123_XOR_AND() {
+; CHECK-LABEL: fua_123_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB77_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %.not = icmp eq i32 %0, 0
+  %. = select i1 %.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 1) && (cc != 2)) ^ (cc != 3).
+define range(i64 5, 9) i64 @fua_123_AND_XOR_a() {
+; CHECK-LABEL: fua_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB78_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc == 1) && ((cc != 2) ^ (cc != 3))
+define noundef i64 @fua_123_AND_XOR_b() {
+; CHECK-LABEL: fua_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 8
+}
+
+; Test ((cc == 0) || (cc != 1)) && (cc != 2)
+define i64 @bar1a_012_OR_AND() {
+; CHECK-LABEL: bar1a_012_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnlh dummy at PLT
+; CHECK-NEXT:  .LBB80_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -1
+  %narrow = icmp ult i32 %2, 2
+  br i1 %narrow, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+declare void @dummy() local_unnamed_addr #1
+
+; Test ((cc == 0) && (cc != 1)) || (cc != 2)
+define i64 @bar1a_012_AND_OR_a() {
+; CHECK-LABEL: bar1a_012_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB81_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc != 1) || (cc != 2))
+define i64 @bar1a_012_AND_OR_b() {
+; CHECK-LABEL: bar1a_012_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB82_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc != 1)) && (cc != 3)
+define i64 @bar1a_013_OR_AND() {
+; CHECK-LABEL: bar1a_013_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jghe dummy at PLT
+; CHECK-NEXT:  .LBB83_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc != 1)) || (cc != 3).
+define i64 @bar1a_013_XOR_AND_OR_a() {
+; CHECK-LABEL: bar1a_013_XOR_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB84_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc != 1) || (cc != 3))
+define i64 @bar1a_013_AND_OR_b() {
+; CHECK-LABEL: bar1a_013_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB85_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc != 2)) && (cc != 3)
+define i64 @bar1a_023_OR_AND() {
+; CHECK-LABEL: bar1a_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB86_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %narrow = icmp samesign ugt i32 %2, -3
+  br i1 %narrow, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc != 2)) || (cc != 3)
+define i64 @bar1a_023_AND_OR_a() {
+; CHECK-LABEL: bar1a_023_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB87_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc != 2) || (cc != 3)).
+define i64 @bar1a_023_AND_OR_b() {
+; CHECK-LABEL: bar1a_023_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB88_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) || (cc != 2)) && (cc != 3)
+define i64 @bar1a_123_OR_AND() {
+; CHECK-LABEL: bar1a_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB89_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %narrow = icmp samesign ugt i32 %2, -3
+  br i1 %narrow, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Tset ((cc == 1) && (cc != 2)) || (cc != 3).
+define i64 @bar1a_123_AND_OR_a() {
+; CHECK-LABEL: bar1a_123_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB90_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 1) && ((cc != 2) || (cc != 3))
+define i64 @bar1a_123_AND_OR_b() {
+; CHECK-LABEL: bar1a_123_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB91_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc != 1)) && (cc != 2)
+define i64 @bar1a_012_XOR_AND() {
+; CHECK-LABEL: bar1a_012_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB92_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %.not = icmp eq i32 %0, 3
+  br i1 %.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc != 1)) ^ (cc != 2)
+define i64 @bar1a_012_AND_XOR_a() {
+; CHECK-LABEL: bar1a_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnhe dummy at PLT
+; CHECK-NEXT:  .LBB93_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not = icmp eq i32 %2, 0
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc != 1)) && (cc != 3).
+define i64 @bar1a_012_AND_XOR_b() {
+; CHECK-LABEL: bar1a_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+define i64 @bar1a_013_XOR_AND() {
+; CHECK-LABEL: bar1a_013_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgh dummy at PLT
+; CHECK-NEXT:  .LBB95_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %switch = icmp eq i32 %0, 2
+  br i1 %switch, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc != 1) ^ (cc != 3))
+define i64 @bar1a_013_XOR_AND_XOR_a() {
+; CHECK-LABEL: bar1a_013_XOR_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB96_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  switch i32 %0, label %if.then [
+    i32 3, label %if.end
+    i32 0, label %if.end
+  ]
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %entry, %if.then
+  ret i64 undef
+}
+
+; Test  (cc == 0) && ((cc != 1) ^ (cc != 3)).
+define i64 @bar1a_013_AND_XOR_b() {
+; CHECK-LABEL: bar1a_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc != 2)) ^ (cc != 3)
+define i64 @bar1a_023_XOR_AND() {
+; CHECK-LABEL: bar1a_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB98_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  switch i32 %0, label %if.then [
+    i32 3, label %if.end
+    i32 0, label %if.end
+  ]
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %entry, %if.then
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc != 2)) ^ (cc != 3)
+define i64 @bar1a_023_AND_XOR_a() {
+; CHECK-LABEL: bar1a_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB99_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  switch i32 %0, label %if.then [
+    i32 3, label %if.end
+    i32 0, label %if.end
+  ]
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %entry, %if.then
+  ret i64 undef
+}
+
+; Test  (cc == 0) && ((cc != 2) ^ (cc != 3)).
+define i64 @bar1a_023_AND_XOR_b() {
+; CHECK-LABEL: bar1a_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 1) ^ (cc != 2)) && (cc != 3).
+define i64 @bar1a_123_XOR_AND() {
+; CHECK-LABEL: bar1a_123_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB101_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %.not = icmp eq i32 %0, 0
+  br i1 %.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) && (cc != 2)) ^ (cc != 3)
+define i64 @bar1a_123_AND_XOR_a() {
+; CHECK-LABEL: bar1a_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jghe dummy at PLT
+; CHECK-NEXT:  .LBB102_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 1) && ((cc != 2) ^ (cc != 3))
+define i64 @bar1a_123_AND_XOR_b() {
+; CHECK-LABEL: bar1a_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc == 1)) && (cc != 2)
+define range(i64 5, 9) i64 @fuaa_012_OR_AND() {
+; CHECK-LABEL: fuaa_012_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB104_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %or.cond.i = icmp samesign ugt i32 %0, 1
+  %. = select i1 %or.cond.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc == 1)) || (cc != 2)
+define range(i64 5, 9) i64 @fuaa_012_AND_OR_a() {
+; CHECK-LABEL: fuaa_012_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB105_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc == 1) || (cc != 2)).
+define range(i64 5, 9) i64 @fuaa_012_AND_OR_b() {
+; CHECK-LABEL: fuaa_012_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB106_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc == 1)) && (cc != 3)
+define range(i64 5, 9) i64 @fuaa_013_OR_AND() {
+; CHECK-LABEL: fuaa_013_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB107_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %or.cond.i = icmp samesign ugt i32 %0, 1
+  %. = select i1 %or.cond.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc == 1)) || (cc != 3)
+define range(i64 5, 9) i64 @fuaa_013_AND_OR_a() {
+; CHECK-LABEL: fuaa_013_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB108_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc == 1) || (cc != 3))
+define range(i64 5, 9) i64 @fuaa_013_AND_OR_b() {
+; CHECK-LABEL: fuaa_013_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB109_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc == 2)) && (cc != 3)
+define range(i64 5, 9) i64 @fuaa_023_OR_AND() {
+; CHECK-LABEL: fuaa_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB110_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc == 2)) || (cc != 3)
+define range(i64 5, 9) i64 @fuaa_023_AND_OR_a() {
+; CHECK-LABEL: fuaa_023_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB111_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc == 2) || (cc != 3))
+define range(i64 5, 9) i64 @fuaa_023_AND_OR_b() {
+; CHECK-LABEL: fuaa_023_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB112_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 1) || (cc == 2)) && (cc != 3)
+define range(i64 5, 9) i64 @fuaa_123_OR_AND() {
+; CHECK-LABEL: fuaa_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB113_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %or.cond.i = icmp ult i32 %2, -2
+  %. = select i1 %or.cond.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 1) && (cc == 2)) || (cc != 3)
+define range(i64 5, 9) i64 @fuaa_123_AND_OR_a() {
+; CHECK-LABEL: fuaa_123_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB114_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 1) && ((cc == 2) || (cc != 3))
+define range(i64 5, 9) i64 @fuaa_123_AND_OR_b() {
+; CHECK-LABEL: fuaa_123_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB115_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test  ((cc == 0) ^ (cc == 1)) && (cc != 2).
+define range(i64 5, 9) i64 @fuaa_012_XOR_AND() {
+; CHECK-LABEL: fuaa_012_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB116_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i = icmp samesign ugt i32 %0, 1
+  %. = select i1 %xor7.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc == 1)) ^ (cc != 2)
+define range(i64 5, 9) i64 @fuaa_012_AND_XOR_a() {
+; CHECK-LABEL: fuaa_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB117_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc == 1) ^ (cc != 2))
+define range(i64 5, 9) i64 @fuaa_012_AND_XOR_b() {
+; CHECK-LABEL: fuaa_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB118_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) ^ (cc == 1)) && (cc != 3)
+define range(i64 5, 9) i64 @fuaa_013_XOR_AND() {
+; CHECK-LABEL: fuaa_013_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB119_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i = icmp samesign ugt i32 %0, 1
+  %. = select i1 %xor7.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; ((cc == 0) && (cc == 1)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fuaa_013_XOR_AND_XOR_a() {
+; CHECK-LABEL: fuaa_013_XOR_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB120_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc == 1) ^ (cc != 3)
+define range(i64 5, 9) i64 @fuaa_013_AND_XOR_b() {
+; CHECK-LABEL: fuaa_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB121_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc == 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fuaa_023_XOR_AND() {
+; CHECK-LABEL: fuaa_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB122_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) && (cc == 2)) ^ (cc != 3).
+define range(i64 5, 9) i64 @fuaa_023_AND_XOR_a() {
+; CHECK-LABEL: fuaa_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB123_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) && ((cc == 2) ^ (cc != 3)).
+define range(i64 5, 9) i64 @fuaa_023_AND_XOR_b() {
+; CHECK-LABEL: fuaa_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB124_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 1) ^ (cc == 2)) && (cc != 3)
+define range(i64 5, 9) i64 @fuaa_123_XOR_AND() {
+; CHECK-LABEL: fuaa_123_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB125_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %xor7.i = icmp ult i32 %2, -2
+  %. = select i1 %xor7.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 1) && (cc == 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fuaa_123_AND_XOR_a() {
+; CHECK-LABEL: fuaa_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB126_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 1) && ((cc == 2) ^ (cc != 3))
+define range(i64 5, 9) i64 @fuaa_123_AND_XOR_b() {
+; CHECK-LABEL: fuaa_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB127_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc == 1)) && (cc != 2)
+define i64 @bar2a_012_OR_AND() {
+; CHECK-LABEL: bar2a_012_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB128_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %or.cond.i = icmp samesign ugt i32 %0, 1
+  br i1 %or.cond.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 1)) || (cc != 2)
+define i64 @bar2a_012_AND_OR_a() {
+; CHECK-LABEL: bar2a_012_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB129_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc == 1) || (cc != 2)).
+define i64 @bar2a_012_AND_OR_b() {
+; CHECK-LABEL: bar2a_012_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB130_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc == 1)) && (cc != 3)
+define i64 @bar2a_013_OR_AND() {
+; CHECK-LABEL: bar2a_013_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB131_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %or.cond.i = icmp samesign ugt i32 %0, 1
+  br i1 %or.cond.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 1)) || (cc != 3)
+define i64 @bar2a_013_XOR_AND_OR_a() {
+; CHECK-LABEL: bar2a_013_XOR_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB132_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc == 1) || (cc != 3))
+define i64 @bar2a_013_AND_OR_b() {
+; CHECK-LABEL: bar2a_013_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB133_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc == 2)) && (cc != 3)
+define i64 @bar2a_023_OR_AND() {
+; CHECK-LABEL: bar2a_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jghe dummy at PLT
+; CHECK-NEXT:  .LBB134_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 2)) || (cc != 3)
+define i64 @bar2a_023_AND_OR_a() {
+; CHECK-LABEL: bar2a_023_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB135_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc == 2) || (cc != 3))
+define i64 @bar2a_023_AND_OR_b() {
+; CHECK-LABEL: bar2a_023_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB136_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) || (cc == 2)) && (cc != 3)
+define i64 @bar2a_123_OR_AND() {
+; CHECK-LABEL: bar2a_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB137_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %or.cond.i = icmp ult i32 %2, -2
+  br i1 %or.cond.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) && (cc == 2)) || (cc != 3)
+define i64 @bar2a_123_AND_OR_a() {
+; CHECK-LABEL: bar2a_123_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB138_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 1) && ((cc == 2) || (cc != 3))
+define i64 @bar2a_123_AND_OR_b() {
+; CHECK-LABEL: bar2a_123_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB139_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test  ((cc == 0) ^ (cc == 1)) && (cc != 2).
+define i64 @bar2a_012_XOR_AND() {
+; CHECK-LABEL: bar2a_012_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB140_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i = icmp samesign ugt i32 %0, 1
+  br i1 %xor7.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 1)) ^ (cc != 2)
+define i64 @bar2a_012_AND_XOR_a() {
+; CHECK-LABEL: bar2a_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB141_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc == 1) ^ (cc != 2))
+define i64 @bar2a_012_AND_XOR_b() {
+; CHECK-LABEL: bar2a_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB142_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc == 1)) && (cc != 3)
+define i64 @bar2a_013_XOR_AND() {
+; CHECK-LABEL: bar2a_013_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB143_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i = icmp samesign ugt i32 %0, 1
+  br i1 %xor7.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 1)) ^ (cc != 3)
+define i64 @bar2a_013_XOR_AND_XOR_a() {
+; CHECK-LABEL: bar2a_013_XOR_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB144_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc == 1) ^ (cc != 3)
+define i64 @bar2a_013_AND_XOR_b() {
+; CHECK-LABEL: bar2a_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB145_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (((cc == 0) ^ (cc != 2)) && (cc != 3))
+define i64 @bar2a_023_XOR_AND() {
+; CHECK-LABEL: bar2a_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB146_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc == 2)) ^ (cc != 3)
+define i64 @bar2a_023_AND_XOR_a() {
+; CHECK-LABEL: bar2a_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB147_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc == 2) ^ (cc != 3)).
+define i64 @bar2a_023_AND_XOR_b() {
+; CHECK-LABEL: bar2a_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB148_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) ^ (cc == 2)) && (cc != 3)
+define i64 @bar2a_123_XOR_AND() {
+; CHECK-LABEL: bar2a_123_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB149_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %xor7.i = icmp ult i32 %2, -2
+  br i1 %xor7.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) && (cc == 2)) ^ (cc != 3)
+define i64 @bar2a_123_AND_XOR_a() {
+; CHECK-LABEL: bar2a_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB150_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 1) && ((cc == 2) ^ (cc != 3))
+define i64 @bar2a_123_AND_XOR_b() {
+; CHECK-LABEL: bar2a_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB151_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) || ((cc != 1) ^ (cc != 2))
+define range(i64 5, 9) i64 @fu1a_012_OR_XOR_a() {
+; CHECK-LABEL: fu1a_012_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB152_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 3
+  %. = select i1 %xor6.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc != 1)) ^ (cc != 2)
+define range(i64 5, 9) i64 @fu1a_012_OR_XOR_c() {
+; CHECK-LABEL: fu1a_012_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB153_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %xor5.i = icmp ult i32 %2, -2
+  %. = select i1 %xor5.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) ^ ((cc != 1) || (cc != 2))
+define range(i64 5, 9) i64 @fu1a_012_XOR_OR_a() {
+; CHECK-LABEL: fu1a_012_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB154_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) ^ (cc != 1)) || (cc != 2)
+define noundef range(i64 5, 9) i64 @fu1a_012_XOR_OR_c() {
+; CHECK-LABEL: fu1a_012_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test (cc == 0) || ((cc != 1) ^ (cc != 3))
+define range(i64 5, 9) i64 @fu1a_013_OR_XOR_a() {
+; CHECK-LABEL: fu1a_013_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB156_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp ne i32 %0, 0
+  %2 = and i32 %0, 1
+  %xor6.i.not = icmp eq i32 %2, 0
+  %narrow.not = and i1 %cmp.i, %xor6.i.not
+  %. = select i1 %narrow.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc != 1)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu1a_013_OR_XOR_c() {
+; CHECK-LABEL: fu1a_013_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB157_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test  (cc == 0) ^ ((cc != 1) || (cc != 3))
+define range(i64 5, 9) i64 @fu1a_013_XOR_OR_a() {
+; CHECK-LABEL: fu1a_013_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB158_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) ^ (cc != 1)) || (cc != 3)
+define noundef range(i64 5, 9) i64 @fu1a_013_XOR_OR_c() {
+; CHECK-LABEL: fu1a_013_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test (cc == 0) || ((cc != 2) ^ (cc != 3))
+define range(i64 5, 9) i64 @fu1a_023_OR_XOR_a() {
+; CHECK-LABEL: fu1a_023_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB160_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 1
+  %. = select i1 %xor6.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc != 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu1a_023_OR_XOR_c() {
+; CHECK-LABEL: fu1a_023_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB161_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i = icmp samesign ult i32 %0, 2
+  %. = select i1 %xor5.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) ^ ((cc != 2) || (cc != 3))
+define range(i64 5, 9) i64 @fu1a_023_XOR_OR_a() {
+; CHECK-LABEL: fu1a_023_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB162_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) ^ (cc != 2)) || (cc != 3)
+define noundef range(i64 5, 9) i64 @fu1a_023_XOR_OR_c() {
+; CHECK-LABEL: fu1a_023_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test (cc == 1) || ((cc != 2) ^ (cc != 3))
+define range(i64 5, 9) i64 @fu1a_123_OR_XOR_a() {
+; CHECK-LABEL: fu1a_123_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB164_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 0
+  %. = select i1 %xor6.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 1) || (cc != 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu1a_123_OR_XOR_c() {
+; CHECK-LABEL: fu1a_123_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB165_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i = icmp samesign ult i32 %0, 2
+  %. = select i1 %xor5.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 1 ) ^ ((cc != 2) || (cc != 3))
+define range(i64 5, 9) i64 @fu1a_123_XOR_OR_a() {
+; CHECK-LABEL: fu1a_123_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB166_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 1) ^ (cc != 2)) || (cc != 3)
+define noundef range(i64 5, 9) i64 @fu1a_123_XOR_OR_c() {
+; CHECK-LABEL: fu1a_123_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test (cc == 0) || ((cc != 1) ^ (cc != 2))
+define i64 @bar3a_012_OR_XOR_a() {
+; CHECK-LABEL: bar3a_012_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB168_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 3
+  br i1 %xor6.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc != 1)) ^ (cc != 2)
+define i64 @bar3a_012_OR_XOR_c() {
+; CHECK-LABEL: bar3a_012_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB169_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %xor5.i = icmp ult i32 %2, -2
+  br i1 %xor5.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) ^ ((cc != 1) || (cc != 2))
+define i64 @bar3a_012_XOR_OR_a() {
+; CHECK-LABEL: bar3a_012_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB170_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc != 1)) || (cc != 2)
+define i64 @bar3a_012_XOR_OR_c() {
+; CHECK-LABEL: bar3a_012_XOR_OR_c:
+; CHECK:       # %bb.0: # %if.end
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+if.end:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret i64 undef
+}
+
+; Test (cc == 0) || ((cc != 1) ^ (cc != 3))
+define i64 @bar3a_013_OR_XOR_a() {
+; CHECK-LABEL: bar3a_013_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB172_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp ne i32 %0, 0
+  %2 = and i32 %0, 1
+  %xor6.i.not = icmp eq i32 %2, 0
+  %narrow.not = and i1 %cmp.i, %xor6.i.not
+  br i1 %narrow.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc != 1)) ^ (cc != 3)
+define i64 @bar3a_013_OR_XOR_c() {
+; CHECK-LABEL: bar3a_013_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnhe dummy at PLT
+; CHECK-NEXT:  .LBB173_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test  (cc == 0) ^ ((cc != 1) || (cc != 3))
+define i64 @bar3a_013_XOR_OR_a() {
+; CHECK-LABEL: bar3a_013_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB174_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc != 1)) || (cc != 3)
+define i64 @bar3a_013_XOR_OR_c() {
+; CHECK-LABEL: bar3a_013_XOR_OR_c:
+; CHECK:       # %bb.0: # %if.end
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+if.end:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret i64 undef
+}
+
+; Test (cc == 0) || ((cc != 2) ^ (cc != 3))
+define i64 @bar3a_023_OR_XOR_a() {
+; CHECK-LABEL: bar3a_023_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB176_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 1
+  br i1 %xor6.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc != 2)) ^ (cc != 3)
+define i64 @bar3a_023_OR_XOR_c() {
+; CHECK-LABEL: bar3a_023_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnle dummy at PLT
+; CHECK-NEXT:  .LBB177_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i = icmp samesign ult i32 %0, 2
+  br i1 %xor5.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) ^ ((cc != 2) || (cc != 3))
+define i64 @bar3a_023_XOR_OR_a() {
+; CHECK-LABEL: bar3a_023_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB178_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc != 2)) || (cc != 3)
+define i64 @bar3a_023_XOR_OR_c() {
+; CHECK-LABEL: bar3a_023_XOR_OR_c:
+; CHECK:       # %bb.0: # %if.end
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+if.end:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret i64 undef
+}
+
+; Test (cc == 1) || ((cc != 2) ^ (cc != 3))
+define i64 @bar3a_123_OR_XOR_a() {
+; CHECK-LABEL: bar3a_123_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB180_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.i.not = icmp eq i32 %0, 0
+  br i1 %xor6.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) || (cc != 2)) ^ (cc != 3)
+define i64 @bar3a_123_OR_XOR_c() {
+; CHECK-LABEL: bar3a_123_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnle dummy at PLT
+; CHECK-NEXT:  .LBB181_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i = icmp samesign ult i32 %0, 2
+  br i1 %xor5.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 1 ) ^ ((cc != 2) || (cc != 3))
+define i64 @bar3a_123_XOR_OR_a() {
+; CHECK-LABEL: bar3a_123_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB182_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) ^ (cc != 2)) || (cc != 3
+define i64 @bar3a_123_XOR_OR_c() {
+; CHECK-LABEL: bar3a_123_XOR_OR_c:
+; CHECK:       # %bb.0: # %if.end
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+if.end:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret i64 undef
+}
+
+; Test (cc == 0) || ((cc == 1) ^ (cc != 2))
+define range(i64 5, 9) i64 @fu2a_012_OR_XOR_a() {
+; CHECK-LABEL: fu2a_012_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB184_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -1
+  %xor6.i = icmp ult i32 %2, 2
+  %. = select i1 %xor6.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc == 1)) ^ (cc != 2)
+define range(i64 5, 9) i64 @fu2a_012_OR_XOR_c() {
+; CHECK-LABEL: fu2a_012_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB185_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i.not = icmp eq i32 %0, 3
+  %. = select i1 %xor5.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc == 0) ^ ((cc == 1) || (cc != 2))
+define range(i64 5, 9) i64 @fu2a_012_XOR_OR_a() {
+; CHECK-LABEL: fu2a_012_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB186_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc != 2)
+define range(i64 5, 9) i64 @fu2a_012_XOR_OR_c() {
+; CHECK-LABEL: fu2a_012_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB187_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp3.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test  (cc == 0) || ((cc == 1) ^ (cc != 3))
+define range(i64 5, 9) i64 @fu2a_013_OR_XOR_a() {
+; CHECK-LABEL: fu2a_013_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB188_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc == 1)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu2a_013_OR_XOR_c() {
+; CHECK-LABEL: fu2a_013_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB189_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ult i32 %0, 2
+  %cmp2.i = icmp eq i32 %0, 3
+  %xor5.i.not = or i1 %2, %cmp2.i
+  %. = select i1 %xor5.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) ^ ((cc == 1) || (cc != 3))
+define range(i64 5, 9) i64 @fu2a_013_XOR_OR_a() {
+; CHECK-LABEL: fu2a_013_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB190_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp eq i32 %0, 0
+  %cmp3.i = icmp eq i32 %0, 3
+  %xor7.i.not = or i1 %cmp.i, %cmp3.i
+  %. = select i1 %xor7.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc != 3)
+define range(i64 5, 9) i64 @fu2a_013_XOR_OR_c() {
+; CHECK-LABEL: fu2a_013_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB191_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp3.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) || ((cc == 2) ^ (cc != 3))
+define range(i64 5, 9) i64 @fu2a_023_OR_XOR_a() {
+; CHECK-LABEL: fu2a_023_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB192_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %xor6.i = icmp samesign ugt i32 %2, -3
+  %. = select i1 %xor6.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) || (cc == 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu2a_023_OR_XOR_c() {
+; CHECK-LABEL: fu2a_023_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB193_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp eq i32 %0, 3
+  %tobool.not.not = xor i1 %3, %2
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc == 0) ^ ((cc == 2) || (cc != 3))
+define range(i64 5, 9) i64 @fu2a_023_XOR_OR_a() {
+; CHECK-LABEL: fu2a_023_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB194_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp eq i32 %0, 0
+  %cmp3.i = icmp eq i32 %0, 3
+  %xor7.i.not = or i1 %cmp.i, %cmp3.i
+  %. = select i1 %xor7.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 0) ^ (cc == 2)) || (cc != 3)
+define range(i64 5, 9) i64 @fu2a_023_XOR_OR_c() {
+; CHECK-LABEL: fu2a_023_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB195_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp3.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 1) || ((cc == 2) ^ (cc != 3))
+define range(i64 5, 9) i64 @fu2a_123_OR_XOR_a() {
+; CHECK-LABEL: fu2a_123_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB196_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %xor6.i = icmp samesign ugt i32 %2, -3
+  %. = select i1 %xor6.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc == 1) || (cc == 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu2a_123_OR_XOR_c() {
+; CHECK-LABEL: fu2a_123_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB197_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i.not = icmp eq i32 %0, 0
+  %. = select i1 %xor5.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc == 1 ) ^ ((cc == 2) || (cc != 3))
+define range(i64 5, 9) i64 @fu2a_123_XOR_OR_a() {
+; CHECK-LABEL: fu2a_123_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB198_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc == 1) ^ (cc == 2)) || (cc != 3)
+define range(i64 5, 9) i64 @fu2a_123_XOR_OR_c() {
+; CHECK-LABEL: fu2a_123_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB199_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp3.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc == 0) || ((cc == 1) ^ (cc != 2))
+define i64 @bar4a_012_OR_XOR_a() {
+; CHECK-LABEL: bar4a_012_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnlh dummy at PLT
+; CHECK-NEXT:  .LBB200_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -1
+  %xor6.i = icmp ult i32 %2, 2
+  br i1 %xor6.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc == 1)) ^ (cc != 2)
+define i64 @bar4a_012_OR_XOR_c() {
+; CHECK-LABEL: bar4a_012_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB201_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i.not = icmp eq i32 %0, 3
+  br i1 %xor5.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) ^ ((cc == 1) || (cc != 2))
+define i64 @bar4a_012_XOR_OR_a() {
+; CHECK-LABEL: bar4a_012_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnhe dummy at PLT
+; CHECK-NEXT:  .LBB202_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not = icmp eq i32 %2, 0
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc != 2)
+define i64 @bar4a_012_XOR_OR_c() {
+; CHECK-LABEL: bar4a_012_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB203_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 2
+  br i1 %cmp3.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test  (cc == 0) || ((cc == 1) ^ (cc != 3)
+define i64 @bar4a_013_OR_XOR_a() {
+; CHECK-LABEL: bar4a_013_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jghe dummy at PLT
+; CHECK-NEXT:  .LBB204_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc == 1)) ^ (cc != 3)
+define i64 @bar4a_013_OR_XOR_c() {
+; CHECK-LABEL: bar4a_013_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgh dummy at PLT
+; CHECK-NEXT:  .LBB205_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %switch = icmp eq i32 %0, 2
+  br i1 %switch, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret i64 undef
+}
+
+; Test (cc == 0) ^ ((cc == 1) || (cc != 3))
+define i64 @bar4a_013_XOR_OR_a() {
+; CHECK-LABEL: bar4a_013_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB206_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  switch i32 %0, label %if.then [
+    i32 3, label %if.end
+    i32 0, label %if.end
+  ]
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %entry, %if.then
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc == 1)) || (cc != 3)
+define i64 @bar4a_013_XOR_OR_c() {
+; CHECK-LABEL: bar4a_013_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB207_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  br i1 %cmp3.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) || ((cc == 2) ^ (cc != 3))
+define i64 @bar4a_023_OR_XOR_a() {
+; CHECK-LABEL: bar4a_023_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB208_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %xor6.i = icmp samesign ugt i32 %2, -3
+  br i1 %xor6.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc == 2)) ^ (cc != 3)
+define i64 @bar4a_023_OR_XOR_c() {
+; CHECK-LABEL: bar4a_023_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB209_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp eq i32 %0, 3
+  %tobool.not.not = xor i1 %3, %2
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) ^ ((cc == 2) || (cc != 3))
+define i64 @bar4a_023_XOR_OR_a() {
+; CHECK-LABEL: bar4a_023_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB210_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  switch i32 %0, label %if.then [
+    i32 3, label %if.end
+    i32 0, label %if.end
+  ]
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %entry, %if.then
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc == 2)) || (cc != 3)
+define i64 @bar4a_023_XOR_OR_c() {
+; CHECK-LABEL: bar4a_023_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB211_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  br i1 %cmp3.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 1) || ((cc == 2) ^ (cc != 3))
+define i64 @bar4a_123_OR_XOR_a() {
+; CHECK-LABEL: bar4a_123_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB212_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %xor6.i = icmp samesign ugt i32 %2, -3
+  br i1 %xor6.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) || (cc == 2)) ^ (cc != 3)
+define i64 @bar4a_123_OR_XOR_c() {
+; CHECK-LABEL: bar4a_123_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB213_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i.not = icmp eq i32 %0, 0
+  br i1 %xor5.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 1 ) ^ ((cc == 2) || (cc != 3))
+define i64 @bar4a_123_XOR_OR_a() {
+; CHECK-LABEL: bar4a_123_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jghe dummy at PLT
+; CHECK-NEXT:  .LBB214_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) ^ (cc == 2)) || (cc != 3)
+define i64 @bar4a_123_XOR_OR_c() {
+; CHECK-LABEL: bar4a_123_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB215_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  br i1 %cmp3.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_not.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_not.ll
new file mode 100644
index 0000000000000..84b8858afc8ad
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_not.ll
@@ -0,0 +1,2543 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations.
+; This tests mixing XOR wirh OR, XOR with AND and OR with AND  with
+; different ways of parenthesizing with != operator.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O3 | FileCheck %s
+
+; Test ((cc != 0) || (cc != 1)) ^ (cc != 2).
+define signext range(i32 0, 43) i32 @bar_012_OR_XOR(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+declare void @llvm.assume(i1 noundef) #1
+
+; Test ((cc != 0) || (cc != 1)) ^ (cc != 3).
+define signext range(i32 0, 43) i32 @bar_013_OR_XOR(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc != 0) || (cc != 2)) ^ (cc != 3).
+define signext range(i32 0, 43) i32 @bar_023_OR_XOR(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc != 1) || (cc != 2)) ^ (cc != 3).
+define signext range(i32 0, 43) i32 @bar_123_OR_XOR(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_OR_XOR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test ((cc != 0) ^ (cc != 1)) || (cc != 2)
+define signext range(i32 0, 43) i32 @foo_012_XOR_OR(i32 noundef signext %x) {
+; CHECK-LABEL: foo_012_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB4_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp4.not = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp4.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test ((cc != 0) ^ (cc != 1)) || (cc != 3)
+define signext range(i32 0, 43) i32 @foo_013_XOR_OR(i32 noundef signext %x) {
+; CHECK-LABEL: foo_013_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB5_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp4.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp4.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test ((cc != 0) ^ (cc != 2)) || (cc != 3)
+define signext range(i32 0, 43) i32 @foo_023_XOR_OR(i32 noundef signext %x) {
+; CHECK-LABEL: foo_023_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB6_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp4.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp4.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test ((cc != 1) ^ (cc != 2)) || (cc != 3)
+define signext range(i32 0, 43) i32 @foo_123_XOR_OR(i32 noundef signext %x) {
+; CHECK-LABEL: foo_123_XOR_OR:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB7_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp4.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp4.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc != 0) || (cc != 1)) && (cc != 2))
+define signext range(i32 0, 43) i32 @bar_012_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB8_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc != 0) || (cc != 1)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar_013_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB9_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc != 0) || (cc != 2)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar_023_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB10_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc != 1) || (cc != 2)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar_123_OR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB11_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp3.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc != 0) ^ (cc != 1)) && (cc != 2))
+define signext range(i32 0, 43) i32 @bar_012_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB12_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor8 = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %xor8, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc != 0) ^ (cc != 1)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar_013_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB13_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor8 = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %xor8, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc != 0) ^ (cc != 2)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar_023_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB14_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp ne i32 %asmresult1, 0
+  %cmp2 = icmp ne i32 %asmresult1, 2
+  %xor8 = xor i1 %cmp, %cmp2
+  %cond = select i1 %xor8, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc != 1) ^ (cc != 2)) && (cc != 3))
+define signext range(i32 0, 43) i32 @bar_123_XOR_AND(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB15_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %xor8 = icmp ult i32 %2, 2
+  %cond = select i1 %xor8, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc != 0) && (cc != 1)) ^ (cc != 2)).
+define signext range(i32 0, 43) i32 @bar_012_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB16_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor6.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %xor6.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc != 0) && (cc != 1)) ^ (cc != 3)).
+define signext range(i32 0, 43) i32 @bar_013_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB17_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ugt i32 %asmresult1, 1
+  %cmp3 = icmp ne i32 %asmresult1, 3
+  %xor6 = xor i1 %2, %cmp3
+  %cond = select i1 %xor6, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test (((cc != 0) && (cc != 2)) ^ (cc != 3)).
+define signext range(i32 0, 43) i32 @bar_023_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB18_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %asmresult1 to i1
+  %3 = icmp eq i32 %asmresult1, 3
+  %tobool.not = xor i1 %3, %2
+  %cond = select i1 %tobool.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test (((cc != 1) && (cc != 2)) ^ (cc != 3)).
+define signext range(i32 0, 43) i32 @bar_123_AND_XOR_a(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bner %r14
+; CHECK-NEXT:  .LBB19_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -3
+  %3 = icmp ult i32 %2, -2
+  %cmp3 = icmp ne i32 %asmresult1, 3
+  %xor6 = xor i1 %cmp3, %3
+  %cond = select i1 %xor6, i32 42, i32 0
+  ret i32 %cond
+}
+
+; ((cc != 0) && ((cc != 1)) ^ (cc != 2))
+define signext range(i32 0, 43) i32 @bar_012_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB20_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not = icmp eq i32 %asmresult1, 0
+  %xor7.not = icmp eq i32 %asmresult1, 3
+  %2 = or i1 %cmp.not, %xor7.not
+  %cond = select i1 %2, i32 0, i32 42
+  ret i32 %cond
+}
+
+; ((cc != 0) && ((cc != 1)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar_013_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnher %r14
+; CHECK-NEXT:  .LBB21_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2 = icmp ne i32 %asmresult1, 1
+  %cmp3 = icmp ne i32 %asmresult1, 3
+  %xor7 = xor i1 %cmp2, %cmp3
+  %2 = select i1 %xor7, i32 42, i32 0
+  ret i32 %2
+}
+
+; ((cc != 0) && ((cc != 2)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar_023_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB22_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %2, i32 0, i32 42
+  ret i32 %cond
+}
+
+; ((cc != 1) && ((cc != 2)) ^ (cc != 3))
+define signext range(i32 0, 43) i32 @bar_123_AND_XOR_b(i32 noundef signext %x) {
+; CHECK-LABEL: bar_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB23_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %2, i32 0, i32 42
+  ret i32 %cond
+}
+
+
+ at a = global i32 0, align 4
+
+; Test ((cc != 0) || (cc != 1)) && (cc != 2)
+define range(i64 5, 9) i64 @fu_012_OR_AND() {
+; CHECK-LABEL: fu_012_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB24_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) && (cc != 1)) || (cc != 2)
+define noundef range(i64 5, 9) i64 @fu_012_AND_OR_a() {
+; CHECK-LABEL: fu_012_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test  (cc != 0) && ((cc != 1) || (cc != 2))
+define range(i64 5, 9) i64 @fu_012_AND_OR_b() {
+; CHECK-LABEL: fu_012_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB26_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.not.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) || (cc != 1)) && (cc != 3).
+define range(i64 5, 9) i64 @fu_013_OR_AND() {
+; CHECK-LABEL: fu_013_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB27_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) && (cc != 1)) || (cc != 3)
+define noundef range(i64 5, 9) i64 @fu_013_AND_OR_a() {
+; CHECK-LABEL: fu_013_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test (cc != 0) && ((cc != 1) || (cc != 3))
+define range(i64 5, 9) i64 @fu_013_AND_OR_b() {
+; CHECK-LABEL: fu_013_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB29_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.not.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test  ((cc != 0) || (cc != 2)) && (cc != 3)
+define range(i64 5, 9) i64 @fu_023_OR_AND() {
+; CHECK-LABEL: fu_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB30_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; (cc != 0) && (cc != 2)) || (cc != 3
+define noundef range(i64 5, 9) i64 @fu_023_AND_OR_a() {
+; CHECK-LABEL: fu_023_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test (cc != 0) && ((cc != 2) || (cc != 3)).
+define range(i64 5, 9) i64 @fu_023_AND_OR_b() {
+; CHECK-LABEL: fu_023_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB32_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.not.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 1) || (cc != 2)) && (cc != 3)
+define range(i64 5, 9) i64 @fu_123_OR_AND() {
+; CHECK-LABEL: fu_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB33_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc != 1) && (cc != 2)) || (cc != 3)
+define noundef range(i64 5, 9) i64 @fu_123_AND_OR_a() {
+; CHECK-LABEL: fu_123_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test (cc != 1) && ((cc != 2) || (cc != 3)
+define range(i64 5, 9) i64 @fu_123_AND_OR_b() {
+; CHECK-LABEL: fu_123_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB35_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.not.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) ^ (cc != 1)) && (cc != 2)
+define range(i64 5, 9) i64 @fu_012_XOR_AND() {
+; CHECK-LABEL: fu_012_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB36_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i = icmp samesign ugt i32 %0, 1
+  %. = select i1 %xor7.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) && (cc != 1)) ^ (cc != 2)
+define range(i64 5, 9) i64 @fu_012_AND_XOR_a() {
+; CHECK-LABEL: fu_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB37_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i.not = icmp eq i32 %0, 3
+  %. = select i1 %xor5.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test  (cc != 0) && ((cc != 1) ^ (cc != 2))
+define range(i64 5, 9) i64 @fu_012_AND_XOR_b() {
+; CHECK-LABEL: fu_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB38_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i = icmp eq i32 %0, 0
+  %xor6.i = icmp eq i32 %0, 3
+  %narrow.not = or i1 %cmp.not.i, %xor6.i
+  %. = select i1 %narrow.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) ^ (cc != 1)) && (cc != 3)
+define range(i64 5, 9) i64 @fu_013_XOR_AND() {
+; CHECK-LABEL: fu_013_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB39_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i = icmp samesign ugt i32 %0, 1
+  %. = select i1 %xor7.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) && (cc != 1)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu_013_XOR_AND_XOR_a() {
+; CHECK-LABEL: fu_013_XOR_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB40_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ugt i32 %0, 1
+  %3 = icmp ne i32 %0, 3
+  %tobool.not = and i1 %2, %3
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; (cc != 0) && ((cc != 1) ^ (cc != 3))
+define range(i64 5, 9) i64 @fu_013_AND_XOR_b() {
+; CHECK-LABEL: fu_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB41_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) && (cc != 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu_023_XOR_AND() {
+; CHECK-LABEL: fu_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB42_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp eq i32 %0, 3
+  %tobool.not = xor i1 %3, %2
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) && (cc != 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu_023_AND_XOR_a() {
+; CHECK-LABEL: fu_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB43_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp eq i32 %0, 3
+  %tobool.not = xor i1 %3, %2
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc != 0) && ((cc != 2) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu_023_AND_XOR_b() {
+; CHECK-LABEL: fu_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB44_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %narrow = icmp samesign ult i32 %0, 2
+  %. = select i1 %narrow, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 1) ^ (cc != 2)) && (cc != 3)
+define range(i64 5, 9) i64 @fu_123_XOR_AND() {
+; CHECK-LABEL: fu_123_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB45_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %xor7.i = icmp ult i32 %2, -2
+  %. = select i1 %xor7.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; ((cc != 1) && (cc != 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu_123_AND_XOR_a() {
+; CHECK-LABEL: fu_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB46_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %tobool.not = icmp eq i32 %0, 0
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; (cc != 1) && ((cc != 2) ^ (cc != 3))
+define range(i64 5, 9) i64 @fu_123_AND_XOR_b() {
+; CHECK-LABEL: fu_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB47_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %narrow = icmp samesign ult i32 %0, 2
+  %. = select i1 %narrow, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (((cc != 0) || (cc != 1)) && (cc != 2))
+define i64 @bar1_012_OR_AND() {
+; CHECK-LABEL: bar1_012_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB48_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+declare void @dummy() local_unnamed_addr #1
+
+; Test (((cc == 0) && (cc != 1)) || (cc != 2))
+define i64 @bar1_012_AND_OR_a() {
+; CHECK-LABEL: bar1_012_AND_OR_a:
+; CHECK:       # %bb.0: # %if.end
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+if.end:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc != 1) || (cc != 2))
+define i64 @bar1_012_AND_OR_b() {
+; CHECK-LABEL: bar1_012_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB50_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.not.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+
+; Test (((cc == 0) && (cc != 1)) || (cc != 3))
+define i64 @bar1_013_OR_AND() {
+; CHECK-LABEL: bar1_013_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB51_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (((cc == 0) && (cc != 1)) || (cc != 3)
+define i64 @bar1_013_XOR_AND_OR_a() {
+; CHECK-LABEL: bar1_013_XOR_AND_OR_a:
+; CHECK:       # %bb.0: # %if.end
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+if.end:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc != 1)) || (cc != 3).
+define i64 @bar1_013_AND_OR_b() {
+; CHECK-LABEL: bar1_013_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB53_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.not.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) || (cc != 2)) && (cc != 3)
+define i64 @bar1_023_OR_AND() {
+; CHECK-LABEL: bar1_023_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB54_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc != 2)) || (cc != 3)
+define i64 @bar1_023_AND_OR_a() {
+; CHECK-LABEL: bar1_023_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc != 2) || (cc != 3))
+define i64 @bar1_023_AND_OR_b() {
+; CHECK-LABEL: bar1_023_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB56_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.not.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) || (cc != 2)) && (cc != 3)
+define i64 @bar1_123_OR_AND() {
+; CHECK-LABEL: bar1_123_OR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB57_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Tset ((cc == 1) && (cc != 2)) || (cc != 3).
+define i64 @bar1_123_AND_OR_a() {
+; CHECK-LABEL: bar1_123_AND_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret i64 undef
+}
+
+; Test (cc == 1) && ((cc != 2) || (cc != 3)).
+define i64 @bar1_123_AND_OR_b() {
+; CHECK-LABEL: bar1_123_AND_OR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB59_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.not.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc != 1)) && (cc != 2
+define i64 @bar1_012_XOR_AND() {
+; CHECK-LABEL: bar1_012_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB60_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i = icmp samesign ugt i32 %0, 1
+  br i1 %xor7.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc != 1)) ^ (cc != 2)
+define i64 @bar1_012_AND_XOR_a() {
+; CHECK-LABEL: bar1_012_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB61_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5.i.not = icmp eq i32 %0, 3
+  br i1 %xor5.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) ^ (cc != 1)) && (cc != 3).
+define i64 @bar1_012_AND_XOR_b() {
+; CHECK-LABEL: bar1_012_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB62_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  switch i32 %0, label %if.then [
+    i32 3, label %if.end
+    i32 0, label %if.end
+  ]
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %entry, %if.then
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc != 1)) ^ (cc != 3).
+define i64 @bar1_013_XOR_AND() {
+; CHECK-LABEL: bar1_013_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB63_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor7.i = icmp samesign ugt i32 %0, 1
+  br i1 %xor7.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 0) && ((cc != 1) ^ (cc != 3))
+define i64 @bar1_013_XOR_AND_XOR_a() {
+; CHECK-LABEL: bar1_013_XOR_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB64_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %switch = icmp eq i32 %0, 2
+  br i1 %switch, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret i64 undef
+}
+
+; Test  (cc == 0) && ((cc != 1) ^ (cc != 3)).
+define i64 @bar1_013_AND_XOR_b() {
+; CHECK-LABEL: bar1_013_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnhe dummy at PLT
+; CHECK-NEXT:  .LBB65_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc != 2)) ^ (cc != 3)
+define i64 @bar1_023_XOR_AND() {
+; CHECK-LABEL: bar1_023_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB66_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp eq i32 %0, 3
+  %tobool.not = xor i1 %3, %2
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 0) && (cc != 2)) ^ (cc != 3)
+define i64 @bar1_023_AND_XOR_a() {
+; CHECK-LABEL: bar1_023_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB67_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp eq i32 %0, 3
+  %tobool.not = xor i1 %3, %2
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test  (cc == 0) && ((cc != 2) ^ (cc != 3)).
+define i64 @bar1_023_AND_XOR_b() {
+; CHECK-LABEL: bar1_023_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnle dummy at PLT
+; CHECK-NEXT:  .LBB68_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %narrow = icmp samesign ult i32 %0, 2
+  br i1 %narrow, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) ^ (cc != 2)) && (cc != 3).
+define i64 @bar1_123_XOR_AND() {
+; CHECK-LABEL: bar1_123_XOR_AND:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB69_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %xor7.i = icmp ult i32 %2, -2
+  br i1 %xor7.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc == 1) && (cc != 2)) ^ (cc != 3).
+define i64 @bar1_123_AND_XOR_a() {
+; CHECK-LABEL: bar1_123_AND_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB70_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %tobool.not = icmp eq i32 %0, 0
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc == 1) && ((cc != 2) ^ (cc != 3))
+define i64 @bar1_123_AND_XOR_b() {
+; CHECK-LABEL: bar1_123_AND_XOR_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnle dummy at PLT
+; CHECK-NEXT:  .LBB71_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %narrow = icmp samesign ult i32 %0, 2
+  br i1 %narrow, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc != 0) || ((cc != 1) ^ (cc != 2))
+define range(i64 5, 9) i64 @fu_012_OR_XOR_a() {
+; CHECK-LABEL: fu_012_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB72_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.not.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) || (cc != 1)) ^ (cc != 2)
+define range(i64 5, 9) i64 @fu_012_OR_XOR_c() {
+; CHECK-LABEL: fu_012_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB73_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc != 0) ^ ((cc != 1) || (cc != 2));
+define range(i64 5, 9) i64 @fu_012_XOR_OR_a() {
+; CHECK-LABEL: fu_012_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB74_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc != 0) ^ (cc != 1)) || (cc != 2)
+define range(i64 5, 9) i64 @fu_012_XOR_OR_c() {
+; CHECK-LABEL: fu_012_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB75_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp3.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) ^ (cc != 1)) || (cc != 2)
+define range(i64 5, 9) i64 @fu_013_OR_XOR_a() {
+; CHECK-LABEL: fu_013_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB76_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.not.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) || (cc != 1)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu_013_OR_XOR_c() {
+; CHECK-LABEL: fu_013_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB77_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc != 0) ^ ((cc != 1) || (cc != 3))
+define range(i64 5, 9) i64 @fu_013_XOR_OR_a() {
+; CHECK-LABEL: fu_013_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB78_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc != 0) ^ (cc != 1)) || (cc != 3)
+define range(i64 5, 9) i64 @fu_013_XOR_OR_c() {
+; CHECK-LABEL: fu_013_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB79_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp3.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test  (cc != 0) || ((cc != 2) ^ (cc != 3))
+define range(i64 5, 9) i64 @fu_023_OR_XOR_a() {
+; CHECK-LABEL: fu_023_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB80_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.not.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 0) || (cc != 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu_023_OR_XOR_c() {
+; CHECK-LABEL: fu_023_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB81_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc != 0) ^ ((cc != 2) || (cc != 3))
+define range(i64 5, 9) i64 @fu_023_XOR_OR_a() {
+; CHECK-LABEL: fu_023_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB82_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc != 0) ^ (cc != 2)) || (cc != 3)
+define range(i64 5, 9) i64 @fu_023_XOR_OR_c() {
+; CHECK-LABEL: fu_023_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB83_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp3.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc != 1) || ((cc != 2) ^ (cc != 3))
+define range(i64 5, 9) i64 @fu_123_OR_XOR_a() {
+; CHECK-LABEL: fu_123_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB84_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.not.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test ((cc != 1) || (cc != 2)) ^ (cc != 3)
+define range(i64 5, 9) i64 @fu_123_OR_XOR_c() {
+; CHECK-LABEL: fu_123_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB85_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test (cc != 1 ) ^ ((cc != 2) || (cc != 3))
+define range(i64 5, 9) i64 @fu_123_XOR_OR_a() {
+; CHECK-LABEL: fu_123_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB86_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test ((cc != 1) ^ (cc != 2)) || (cc != 3)
+define range(i64 5, 9) i64 @fu_123_XOR_OR_c() {
+; CHECK-LABEL: fu_123_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB87_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp3.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test (cc != 0) || ((cc != 1) ^ (cc != 2))
+define i64 @bar_012_OR_XOR_a() {
+; CHECK-LABEL: bar_012_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB88_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.not.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc != 0) || (cc != 1)) ^ (cc != 2)
+define i64 @bar_012_OR_XOR_c() {
+; CHECK-LABEL: bar_012_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgh dummy at PLT
+; CHECK-NEXT:  .LBB89_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc != 0) ^ ((cc != 1) || (cc != 2));
+define i64 @bar_012_XOR_OR_a() {
+; CHECK-LABEL: bar_012_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB90_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc != 0) ^ (cc != 1)) || (cc != 2)
+define i64 @bar_012_XOR_OR_c() {
+; CHECK-LABEL: bar_012_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB91_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 2
+  br i1 %cmp3.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc != 0) ^ (cc != 1)) || (cc != 2)
+define i64 @bar_013_OR_XOR_a() {
+; CHECK-LABEL: bar_013_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB92_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.not.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc != 0) || (cc != 1)) ^ (cc != 3)
+define i64 @bar_013_OR_XOR_c() {
+; CHECK-LABEL: bar_013_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB93_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc != 0) ^ ((cc != 1) || (cc != 3))
+define i64 @bar_013_XOR_OR_a() {
+; CHECK-LABEL: bar_013_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB94_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc != 0) ^ (cc != 1)) || (cc != 3)
+define i64 @bar_013_XOR_OR_c() {
+; CHECK-LABEL: bar_013_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB95_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  br i1 %cmp3.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test  (cc != 0) || ((cc != 2) ^ (cc != 3))
+define i64 @bar_023_OR_XOR_a() {
+; CHECK-LABEL: bar_023_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB96_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.not.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc != 0) || (cc != 2)) ^ (cc != 3)
+define i64 @bar_023_OR_XOR_c() {
+; CHECK-LABEL: bar_023_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB97_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc != 0) ^ ((cc != 2) || (cc != 3))
+define i64 @bar_023_XOR_OR_a() {
+; CHECK-LABEL: bar_023_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB98_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; ; Test ((cc != 0) ^ (cc != 2)) || (cc != 3)
+define i64 @bar_023_XOR_OR_c() {
+; CHECK-LABEL: bar_023_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB99_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  br i1 %cmp3.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc != 1) || ((cc != 2) ^ (cc != 3))
+define i64 @bar_123_OR_XOR_a() {
+; CHECK-LABEL: bar_123_OR_XOR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB100_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.not.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc != 1) || (cc != 2)) ^ (cc != 3)
+define i64 @bar_123_OR_XOR_c() {
+; CHECK-LABEL: bar_123_OR_XOR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB101_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test (cc != 1 ) ^ ((cc != 2) || (cc != 3))
+define i64 @bar_123_XOR_OR_a() {
+; CHECK-LABEL: bar_123_XOR_OR_a:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB102_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
+; Test ((cc != 1) ^ (cc != 2)) || (cc != 3)
+define i64 @bar_123_XOR_OR_c() {
+; CHECK-LABEL: bar_123_XOR_OR_c:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB103_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp3.i.not = icmp eq i32 %0, 3
+  br i1 %cmp3.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret i64 undef
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor.ll
new file mode 100644
index 0000000000000..9b51380ac4c09
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor.ll
@@ -0,0 +1,1047 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations
+; for OR for 3 three different functions, including two test cases from heiko.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+
+; Test CC == 0.
+define signext range(i32 0, 43) i32 @foo_0(i32 noundef signext %x) {
+; CHECK-LABEL: foo_0:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %cond = select i1 %cmp, i32 42, i32 0
+  ret i32 %cond
+}
+
+;declare void @llvm.assume(i1 noundef)
+
+; Test CC == 1.
+define signext range(i32 0, 43) i32 @foo_1(i32 noundef signext %x) {
+; CHECK-LABEL: foo_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 1
+  %cond = select i1 %cmp, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 2.
+define signext range(i32 0, 43) i32 @foo_2(i32 noundef signext %x) {
+; CHECK-LABEL: foo_2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 3.
+define signext range(i32 0, 43) i32 @foo_3(i32 noundef signext %x) {
+; CHECK-LABEL: foo_3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 || CC == 1.
+define signext range(i32 0, 43) i32 @foo_01(i32 noundef signext %x) {
+; CHECK-LABEL: foo_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB4_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %2, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 || CC == 2.
+define signext range(i32 0, 43) i32 @foo_02(i32 noundef signext %x) {
+; CHECK-LABEL: foo_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB5_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %3 = icmp eq i32 %2, 0
+  %cond = select i1 %3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 || CC == 3.
+define signext range(i32 0, 43) i32 @foo_03(i32 noundef signext %x) {
+; CHECK-LABEL: foo_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB6_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %cmp2 = icmp eq i32 %asmresult1, 3
+  %2 = or i1 %cmp, %cmp2
+  %cond = select i1 %2, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 1 || CC == 2.
+define signext range(i32 0, 43) i32 @foo_12(i32 noundef signext %x) {
+; CHECK-LABEL: foo_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB7_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %3 = icmp ult i32 %2, 2
+  %cond = select i1 %3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 1 || CC == 3.
+define signext range(i32 0, 43) i32 @foo_13(i32 noundef signext %x) {
+; CHECK-LABEL: foo_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB8_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %.not = icmp eq i32 %2, 0
+  %cond = select i1 %.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 2 || CC == 3.
+define signext range(i32 0, 43) i32 @foo_23(i32 noundef signext %x) {
+; CHECK-LABEL: foo_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB9_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %.not = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 0 || CC == 1 || CC == 2.
+define signext range(i32 0, 43) i32 @foo_012(i32 noundef signext %x) {
+; CHECK-LABEL: foo_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB10_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 0 || CC == 1 || CC == 3.
+define signext range(i32 0, 43) i32 @foo_013(i32 noundef signext %x) {
+; CHECK-LABEL: foo_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB11_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %or.cond = icmp samesign ult i32 %asmresult1, 2
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %2 = or i1 %or.cond, %cmp3
+  %cond = select i1 %2, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 || CC == 2 || CC == 3.
+define signext range(i32 0, 43) i32 @foo_023(i32 noundef signext %x) {
+; CHECK-LABEL: foo_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnlr %r14
+; CHECK-NEXT:  .LBB12_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %or.cond = icmp eq i32 %2, 0
+  %cmp3 = icmp eq i32 %asmresult1, 3
+  %3 = or i1 %cmp3, %or.cond
+  %cond = select i1 %3, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 1 || CC == 2 || CC == 3.
+define signext range(i32 0, 43) i32 @foo_123(i32 noundef signext %x) {
+; CHECK-LABEL: foo_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bner %r14
+; CHECK-NEXT:  .LBB13_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %3 = icmp ult i32 %2, 3
+  %cond = select i1 %3, i32 42, i32 0
+  ret i32 %cond
+}
+
+
+; Test Flag Output Operands with 14 combinations of CCMask and optimizations.
+; These test cases are from heiko kernel and 14 variations of CCMask for OR.
+
+ at a = dso_local global i32 0, align 4
+
+; Test CC == 0.
+define range(i64 5, 9) i64 @fu_0() {
+; CHECK-LABEL: fu_0:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB14_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 1.
+define range(i64 5, 9) i64 @fu_1() {
+; CHECK-LABEL: fu_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB15_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 2.
+define range(i64 5, 9) i64 @fu_2() {
+; CHECK-LABEL: fu_2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB16_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 3.
+define range(i64 5, 9) i64 @fu_3() {
+; CHECK-LABEL: fu_3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB17_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 0 || CC == 1.
+define range(i64 5, 9) i64 @fu_01() {
+; CHECK-LABEL: fu_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB18_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ugt i32 %0, 1
+  %. = select i1 %2, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 || CC == 2.
+define range(i64 5, 9) i64 @fu_02() {
+; CHECK-LABEL: fu_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB19_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 0 || CC == 3.
+define range(i64 5, 9) i64 @fu_03() {
+; CHECK-LABEL: fu_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB20_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp ne i32 %0, 0
+  %cmp1.i = icmp ne i32 %0, 3
+  %.not = and i1 %cmp.i, %cmp1.i
+  %. = select i1 %.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 1 || CC == 2.
+define range(i64 5, 9) i64 @fu_12() {
+; CHECK-LABEL: fu_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB21_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %3 = icmp ult i32 %2, -2
+  %. = select i1 %3, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 1 || CC == 3.
+define range(i64 5, 9) i64 @fu_13() {
+; CHECK-LABEL: fu_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB22_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 2 || CC == 3.
+define range(i64 5, 9) i64 @fu_23() {
+; CHECK-LABEL: fu_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB23_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ult i32 %0, 2
+  %. = select i1 %2, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 || CC == 1 || CC == 2.
+define range(i64 5, 9) i64 @fu_012() {
+; CHECK-LABEL: fu_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB24_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %narrow.not = icmp eq i32 %0, 3
+  %. = select i1 %narrow.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 || CC == 1 || CC == 3.
+define range(i64 5, 9) i64 @fu_013() {
+; CHECK-LABEL: fu_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB25_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i = icmp ne i32 %0, 3
+  %or.cond.i.inv = icmp samesign ugt i32 %0, 1
+  %narrow.not = and i1 %or.cond.i.inv, %cmp2.i
+  %. = select i1 %narrow.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 || CC == 2 || CC == 3.
+define range(i64 5, 9) i64 @fu_023() {
+; CHECK-LABEL: fu_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB26_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %or.cond.i = icmp ne i32 %2, 0
+  %cmp2.i = icmp ne i32 %0, 3
+  %narrow.not = and i1 %cmp2.i, %or.cond.i
+  %. = select i1 %narrow.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test Flag Output Operands with 14 combinations of CCMask and optimizations.
+; These test cases are from heiko kernel and 14 variations of CCMask for OR.
+
+; Test CC == 0.
+define void @bar_0() {
+; CHECK-LABEL: bar_0:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB27_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+declare void @dummy()
+
+; Test CC == 1.
+define void @bar_1() {
+; CHECK-LABEL: bar_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB28_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 2.
+define void @bar_2() {
+; CHECK-LABEL: bar_2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgh dummy at PLT
+; CHECK-NEXT:  .LBB29_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 2
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 3.
+define void @bar_3() {
+; CHECK-LABEL: bar_3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB30_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 3
+  br i1 %cmp.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 || CC == 1.
+define void @bar_01() {
+; CHECK-LABEL: bar_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB31_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ugt i32 %0, 1
+  br i1 %2, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 || CC == 2.
+define void @bar_02() {
+; CHECK-LABEL: bar_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jghe dummy at PLT
+; CHECK-NEXT:  .LBB32_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 || CC == 3.
+define void @bar_03() {
+; CHECK-LABEL: bar_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnlh dummy at PLT
+; CHECK-NEXT:  .LBB33_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  switch i32 %0, label %if.end [
+    i32 3, label %if.then
+    i32 0, label %if.then
+  ]
+
+if.then:                                          ; preds = %entry, %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret void
+}
+
+; Test CC == 1 || CC == 2.
+define void @bar_12() {
+; CHECK-LABEL: bar_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB34_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %3 = icmp ult i32 %2, -2
+  br i1 %3, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 || CC == 3.
+define void @bar_13() {
+; CHECK-LABEL: bar_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnhe dummy at PLT
+; CHECK-NEXT:  .LBB35_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not = icmp eq i32 %2, 0
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 2 || CC == 3.
+define void @bar_23() {
+; CHECK-LABEL: bar_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnle dummy at PLT
+; CHECK-NEXT:  .LBB36_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = icmp samesign ult i32 %0, 2
+  br i1 %2, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 || CC == 1 || CC == 2.
+define void @bar_012() {
+; CHECK-LABEL: bar_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB37_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %narrow.not = icmp eq i32 %0, 3
+  br i1 %narrow.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 || CC == 1 || CC == 3.
+define void @bar_013() {
+; CHECK-LABEL: bar_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB38_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %switch = icmp eq i32 %0, 2
+  br i1 %switch, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret void
+}
+
+; Test CC == 0 || CC == 2 || CC == 3.
+define void @bar_023() {
+; CHECK-LABEL: bar_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB39_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %or.cond.i = icmp ne i32 %2, 0
+  %cmp2.i = icmp ne i32 %0, 3
+  %narrow.not = and i1 %cmp2.i, %or.cond.i
+  br i1 %narrow.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 || CC == 2 || CC == 3.
+define void @bar_123() {
+; CHECK-LABEL: bar_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB40_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %narrow = icmp eq i32 %0, 0
+  br i1 %narrow, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor_eq_noteq.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor_eq_noteq.ll
new file mode 100644
index 0000000000000..728f9a5bbfc3c
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor_eq_noteq.ll
@@ -0,0 +1,854 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations
+; for OR for 3 three different functions, including  two test cases from heiko.
+; This test checks combinations of Equal(==) and NOT EQUAL (!=) operator
+; .e.g. CC === 0 || CC == 1 || CC != 2, or  CC === 0 || CC == 1 || CC != 2.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+
+; Test CC == 0 || CC != 1.
+define signext range(i32 0, 43) i32 @foo_01(i32 noundef signext %x) {
+; CHECK-LABEL: foo_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.not = icmp eq i32 %asmresult1, 1
+  %cond = select i1 %cmp2.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+declare void @llvm.assume(i1 noundef) #1
+
+; Test CC == 0 || CC != 2
+define signext range(i32 0, 43) i32 @foo_02(i32 noundef signext %x) {
+; CHECK-LABEL: foo_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.not = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp2.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 0 || CC != 3.
+define signext range(i32 0, 43) i32 @foo_03(i32 noundef signext %x) {
+; CHECK-LABEL: foo_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp2.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 1 || CC != 2.
+define signext range(i32 0, 43) i32 @foo_12(i32 noundef signext %x) {
+; CHECK-LABEL: foo_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.not = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp2.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 1 || CC != 3.
+define signext range(i32 0, 43) i32 @foo_13(i32 noundef signext %x) {
+; CHECK-LABEL: foo_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB4_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp2.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 2 || CC != 3.
+define signext range(i32 0, 43) i32 @foo_23(i32 noundef signext %x) {
+; CHECK-LABEL: foo_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB5_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp2.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 0 || CC != 1 || CC != 2.
+define noundef signext i32 @foo_012(i32 noundef signext %x) {
+; CHECK-LABEL: foo_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC == 0 || CC != 1 || CC != 3.
+define noundef signext i32 @foo_013(i32 noundef signext %x) {
+; CHECK-LABEL: foo_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC == 0 || CC != 2 || CC != 3.
+define noundef signext i32 @foo_023(i32 noundef signext %x) {
+; CHECK-LABEL: foo_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC == 1 || CC != 2 || CC != 3.
+define noundef signext i32 @foo_123(i32 noundef signext %x) {
+; CHECK-LABEL: foo_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+
+ at a = global i32 0, align 4
+
+; Test CC == 0 || CC != 1.
+define range(i64 5, 9) i64 @fu_01() {
+; CHECK-LABEL: fu_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB10_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp1.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 || CC != 2.
+define range(i64 5, 9) i64 @fu_02() {
+; CHECK-LABEL: fu_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB11_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp1.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 || CC != 3.
+define range(i64 5, 9) i64 @fu_03() {
+; CHECK-LABEL: fu_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB12_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp1.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 1 || CC != 2.
+define range(i64 5, 9) i64 @fu_12() {
+; CHECK-LABEL: fu_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB13_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp1.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 1 || CC != 3.
+define range(i64 5, 9) i64 @fu_13() {
+; CHECK-LABEL: fu_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB14_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp1.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 2 || CC != 3.
+define range(i64 5, 9) i64 @fu_23() {
+; CHECK-LABEL: fu_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB15_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp1.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 || CC != 1 || CC != 2.
+define noundef i64 @fu_012() {
+; CHECK-LABEL: fu_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC == 0 || CC != 1 || CC != 3.
+define noundef i64 @fu_013() {
+; CHECK-LABEL: fu_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC == 0 || CC != 2 || CC != 3.
+define noundef i64 @fu_023() {
+; CHECK-LABEL: fu_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC == 1 || CC != 2 || CC != 3.
+define noundef i64 @fu_123() {
+; CHECK-LABEL: fu_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Tset CC == 0 || CC == 1 || CC != 2.
+define range(i64 5, 9) i64 @fu1_012(){
+; CHECK-LABEL: fu1_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB20_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Tset CC == 0 || CC == 1 || CC != 3.
+define range(i64 5, 9) i64 @fu1_013(){
+; CHECK-LABEL: fu1_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB21_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Tset CC == 0 || CC == 2 || CC != 3.
+define range(i64 5, 9) i64 @fu1_023(){
+; CHECK-LABEL: fu1_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB22_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Tset CC == 1 || CC == 2 || CC != 3.
+define range(i64 5, 9) i64 @fu1_123(){
+; CHECK-LABEL: fu1_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB23_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp2.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+
+; Test CC == 0 || CC != 1.
+define void @bar_01() {
+; CHECK-LABEL: bar_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB24_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 1
+  br i1 %cmp1.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+declare void @dummy() local_unnamed_addr #1
+
+; Test CC == 0 || CC != 2.
+define void @bar_02() {
+; CHECK-LABEL: bar_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB25_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 2
+  br i1 %cmp1.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 || CC != 3.
+define void @bar_03() {
+; CHECK-LABEL: bar_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB26_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 3
+  br i1 %cmp1.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 || CC != 2.
+define void @bar_12() {
+; CHECK-LABEL: bar_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB27_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 2
+  br i1 %cmp1.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 || CC != 3
+define void @bar_13() {
+; CHECK-LABEL: bar_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB28_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 3
+  br i1 %cmp1.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 2 || CC != 3.
+define void @bar_23() {
+; CHECK-LABEL: bar_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB29_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp1.i.not = icmp eq i32 %0, 3
+  br i1 %cmp1.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 || CC != 1 || CC != 2.
+define void @bar_012() {
+; CHECK-LABEL: bar_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC == 0 || CC != 1 || CC != 3.
+define void @bar_013() {
+; CHECK-LABEL: bar_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC == 0 || CC != 2 || CC != 3.
+define void @bar_023() {
+; CHECK-LABEL: bar_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC == 1 || CC != 2 || CC != 3.
+define void @bar_123() {
+; CHECK-LABEL: bar_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Tset CC == 0 || CC == 1 || CC != 2.
+define void @bar1_012() {
+; CHECK-LABEL: bar1_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB34_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 2
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Tset CC == 0 || CC == 1 || CC != 3.
+define void @bar1_013() {
+; CHECK-LABEL: bar1_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB35_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Tset CC == 0 || CC == 2 || CC != 3
+define void @bar1_023() {
+; CHECK-LABEL: bar1_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB36_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Tset CC == 1 || CC == 2 || CC != 3.
+define void @bar1_123() {
+; CHECK-LABEL: bar1_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB37_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp2.i.not = icmp eq i32 %0, 3
+  br i1 %cmp2.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor_not.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor_not.ll
new file mode 100644
index 0000000000000..4bcd004553393
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor_not.ll
@@ -0,0 +1,806 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations
+; for OR for 3 three different functions, including two test cases from heiko.
+; This test checks NOT EQUAL (!=) operator.e.g. CC != 0 || CC != 1 || CC !=2.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+
+; Test CC != 0.
+define signext range(i32 0, 43) i32 @foo_0(i32 noundef signext %x) {
+; CHECK-LABEL: foo_0:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not = icmp eq i32 %asmresult1, 0
+  %cond = select i1 %cmp.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+declare void @llvm.assume(i1 noundef) #1
+
+; Test CC != 1.
+define signext range(i32 0, 43) i32 @foo_1(i32 noundef signext %x) {
+; CHECK-LABEL: foo_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not = icmp eq i32 %asmresult1, 1
+  %cond = select i1 %cmp.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC != 2.
+define signext range(i32 0, 43) i32 @foo_2(i32 noundef signext %x) {
+; CHECK-LABEL: foo_2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not = icmp eq i32 %asmresult1, 2
+  %cond = select i1 %cmp.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC != 3.
+define signext range(i32 0, 43) i32 @foo_3(i32 noundef signext %x) {
+; CHECK-LABEL: foo_3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %cmp.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC != 0 || CC != 1.
+define noundef signext i32 @foo_01(i32 noundef signext %x) {
+; CHECK-LABEL: foo_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC != 0 || CC != 2.
+define noundef signext i32 @foo_02(i32 noundef signext %x) {
+; CHECK-LABEL: foo_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC != 0 || CC != 3.
+define noundef signext i32 @foo_03(i32 noundef signext %x) {
+; CHECK-LABEL: foo_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC != 1 || CC != 2.
+define noundef signext i32 @foo_12(i32 noundef signext %x) {
+; CHECK-LABEL: foo_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC != 1 || CC != 3.
+define noundef signext i32 @foo_13(i32 noundef signext %x) {
+; CHECK-LABEL: foo_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC != 2 || CC != 3.
+define noundef signext i32 @foo_23(i32 noundef signext %x) {
+; CHECK-LABEL: foo_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC != 0 || CC != 1 || CC != 2.
+define noundef signext i32 @foo_012(i32 noundef signext %x) {
+; CHECK-LABEL: foo_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC != 0 || CC != 1 || CC != 3.
+define noundef signext i32 @foo_013(i32 noundef signext %x) {
+; CHECK-LABEL: foo_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC != 0 || CC != 2 || CC != 3.
+define noundef signext i32 @foo_023(i32 noundef signext %x) {
+; CHECK-LABEL: foo_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+; Test CC != 1 || CC != 2 || CC != 3.
+define noundef signext i32 @foo_123(i32 noundef signext %x) {
+; CHECK-LABEL: foo_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i32 42
+}
+
+
+ at a = global i32 0, align 4
+
+; Test CC != 0.
+define range(i64 5, 9) i64 @fu_0(){
+; CHECK-LABEL: fu_0:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB14_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  %. = select i1 %cmp.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 1.
+define range(i64 5, 9) i64 @fu_1(){
+; CHECK-LABEL: fu_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB15_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  %. = select i1 %cmp.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 2.
+define range(i64 5, 9) i64 @fu_2(){
+; CHECK-LABEL: fu_2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB16_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 2
+  %. = select i1 %cmp.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 3.
+define range(i64 5, 9) i64 @fu_3(){
+; CHECK-LABEL: fu_3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB17_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 3
+  %. = select i1 %cmp.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 0 || CC != 1.
+define noundef i64 @fu_01(){
+; CHECK-LABEL: fu_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC != 0 || CC != 2.
+define noundef i64 @fu_02(){
+; CHECK-LABEL: fu_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC != 0 || CC != 3.
+define noundef i64 @fu_03(){
+; CHECK-LABEL: fu_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC != 1 || CC != 2.
+define noundef i64 @fu_12(){
+; CHECK-LABEL: fu_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC != 1 || CC != 3.
+define noundef i64 @fu_13(){
+; CHECK-LABEL: fu_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC != 2 || CC != 3.
+define noundef i64 @fu_23(){
+; CHECK-LABEL: fu_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC != 0 || CC != 1 || CC != 2.
+define noundef i64 @fu_012(){
+; CHECK-LABEL: fu_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC != 0 || CC != 1 || CC != 3.
+define noundef i64 @fu_013(){
+; CHECK-LABEL: fu_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC != 0 || CC != 2 || CC != 3.
+define noundef i64 @fu_023(){
+; CHECK-LABEL: fu_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC != 1 || CC != 2 || CC != 3.
+define noundef i64 @fu_123(){
+; CHECK-LABEL: fu_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  ret i64 5
+}
+
+; Test CC != 0.
+define void @bar_0() {
+; CHECK-LABEL: bar_0:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB28_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 0
+  br i1 %cmp.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+declare void @dummy() local_unnamed_addr #1
+
+; Test CC != 1.
+define void @bar_1() {
+; CHECK-LABEL: bar_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB29_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 1
+  br i1 %cmp.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 2.
+define void @bar_2() {
+; CHECK-LABEL: bar_2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB30_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 2
+  br i1 %cmp.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 3.
+define void @bar_3() {
+; CHECK-LABEL: bar_3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB31_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i.not = icmp eq i32 %0, 3
+  br i1 %cmp.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 0 || CC != 1.
+define void @bar_01() {
+; CHECK-LABEL: bar_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC != 0 || CC != 2.
+define void @bar_02() {
+; CHECK-LABEL: bar_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC != 0 || CC != 3.
+define void @bar_03() {
+; CHECK-LABEL: bar_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC != 1 || CC != 2.
+define void @bar_12() {
+; CHECK-LABEL: bar_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC != 1 || CC != 3.
+define void @bar_13() {
+; CHECK-LABEL: bar_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC != 2 || CC != 3.
+define void @bar_23() {
+; CHECK-LABEL: bar_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC != 0 || CC != 1 || CC != 2.
+define void @bar_012() {
+; CHECK-LABEL: bar_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC != 0 || CC != 1 || CC != 3.
+define void @bar_013() {
+; CHECK-LABEL: bar_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC != 0 || CC != 2 || CC != 3.
+define void @bar_023() {
+; CHECK-LABEL: bar_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
+; Test CC != 1 || CC != 2 || CC != 3.
+define void @bar_123() {
+; CHECK-LABEL: bar_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jg dummy at PLT
+entry:
+  %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  tail call void @dummy() #3
+  ret void
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor.ll
new file mode 100644
index 0000000000000..0a2a508000430
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor.ll
@@ -0,0 +1,784 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations
+; for XOR for 3 three different functions, including two test cases from heiko.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+
+declare void @llvm.assume(i1 noundef)
+
+; Test CC == 0 ^ CC == 1.
+define signext range(i32 0, 43) i32 @foo_01(i32 noundef signext %x) {
+; CHECK-LABEL: foo_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5 = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC == 2.
+define signext range(i32 0, 43) i32 @foo_02(i32 noundef signext %x) {
+; CHECK-LABEL: foo_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %cmp2 = icmp eq i32 %asmresult1, 2
+  %xor5 = xor i1 %cmp, %cmp2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC == 3.
+define signext range(i32 0, 43) i32 @foo_03(i32 noundef signext %x) {
+; CHECK-LABEL: foo_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %cmp2 = icmp eq i32 %asmresult1, 3
+  %xor5 = xor i1 %cmp, %cmp2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 1 ^ CC == 2.
+define signext range(i32 0, 43) i32 @foo_12(i32 noundef signext %x) {
+; CHECK-LABEL: foo_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %xor5 = icmp ult i32 %2, 2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 2 ^ CC == 3.
+define signext range(i32 0, 43) i32 @foo_13(i32 noundef signext %x) {
+; CHECK-LABEL: foo_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnher %r14
+; CHECK-NEXT:  .LBB4_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 1
+  %cmp2 = icmp eq i32 %asmresult1, 3
+  %xor5 = xor i1 %cmp, %cmp2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 2 ^ CC == 3.
+define signext range(i32 0, 43) i32 @foo_23(i32 noundef signext %x) {
+; CHECK-LABEL: foo_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB5_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5 = icmp samesign ugt i32 %asmresult1, 1
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC == 1 ^ CC == 3.
+define signext range(i32 0, 43) i32 @foo_012(i32 noundef signext %x) {
+; CHECK-LABEL: foo_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB6_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor610.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %xor610.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC == 2 ^ CC == 3.
+define signext range(i32 0, 43) i32 @foo_013(i32 noundef signext %x) {
+; CHECK-LABEL: foo_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB7_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor9 = icmp samesign ult i32 %asmresult1, 2
+  %cmp4 = icmp eq i32 %asmresult1, 3
+  %xor610 = xor i1 %xor9, %cmp4
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC == 2 ^ CC == 3.
+define signext range(i32 0, 43) i32 @foo_023(i32 noundef signext %x) {
+; CHECK-LABEL: foo_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnlr %r14
+; CHECK-NEXT:  .LBB8_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %cmp2 = icmp eq i32 %asmresult1, 2
+  %xor9 = xor i1 %cmp, %cmp2
+  %cmp4 = icmp eq i32 %asmresult1, 3
+  %xor610 = xor i1 %cmp4, %xor9
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 1 ^ CC == 2 ^ CC == 3.
+define signext range(i32 0, 43) i32 @foo_123(i32 noundef signext %x) {
+; CHECK-LABEL: foo_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bner %r14
+; CHECK-NEXT:  .LBB9_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %xor9 = icmp ult i32 %2, 2
+  %cmp4 = icmp eq i32 %asmresult1, 3
+  %xor610 = xor i1 %cmp4, %xor9
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations.
+; These test cases are from heiko kernel and 14 variations of CCMask for XOR.
+
+ at a = dso_local global i32 0, align 4
+
+; Test CC == 0 ^ CC == 1.
+define range(i64 5, 9) i64 @fu_01() {
+; CHECK-LABEL: fu_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB10_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor4.i = icmp samesign ugt i32 %0, 1
+  %. = select i1 %xor4.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC == 2.
+define range(i64 5, 9) i64 @fu_02() {
+; CHECK-LABEL: fu_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB11_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC == 3.
+define range(i64 5, 9) i64 @fu_03() {
+; CHECK-LABEL: fu_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB12_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp ne i32 %0, 0
+  %2 = icmp ne i32 %0, 3
+  %tobool.not = and i1 %cmp.i, %2
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 1 ^ CC == 2.
+define range(i64 5, 9) i64 @fu_12() {
+; CHECK-LABEL: fu_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB13_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %xor4.i = icmp ult i32 %2, -2
+  %. = select i1 %xor4.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 1 ^ CC == 3.
+define range(i64 5, 9) i64 @fu_13() {
+; CHECK-LABEL: fu_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB14_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 2 ^ CC == 3.
+define range(i64 5, 9) i64 @fu_23() {
+; CHECK-LABEL: fu_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB15_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor4.i = icmp samesign ult i32 %0, 2
+  %. = select i1 %xor4.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC == 1 ^ CC == 2.
+define range(i64 5, 9) i64 @fu_012() {
+; CHECK-LABEL: fu_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB16_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 3
+  %. = select i1 %xor59.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC == 1 ^ CC == 3.
+define range(i64 5, 9) i64 @fu_013() {
+; CHECK-LABEL: fu_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB17_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor8.i = icmp samesign ugt i32 %0, 1
+  %2 = icmp ne i32 %0, 3
+  %tobool.not = and i1 %xor8.i, %2
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC == 2 ^ CC == 3.
+define range(i64 5, 9) i64 @fu_023() {
+; CHECK-LABEL: fu_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB18_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp eq i32 %0, 0
+  %cmp1.i = icmp eq i32 %0, 2
+  %xor8.i = xor i1 %cmp.i, %cmp1.i
+  %2 = icmp ne i32 %0, 3
+  %tobool.not = xor i1 %2, %xor8.i
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 1 ^ CC == 2 ^ CC == 3.
+define range(i64 5, 9) i64 @fu_123() {
+; CHECK-LABEL: fu_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB19_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %tobool.not = icmp eq i32 %0, 0
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations.
+; These test cases are from heiko kernel and 14 variations of CCMask for XOR.
+
+declare void @dummy()
+
+; Test CC == 0 ^ CC == 1.
+define void @bar_01() {
+; CHECK-LABEL: bar_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB20_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor4.i = icmp samesign ugt i32 %0, 1
+  br i1 %xor4.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 ^ CC == 2.
+define void @bar_02() {
+; CHECK-LABEL: bar_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jghe dummy at PLT
+; CHECK-NEXT:  .LBB21_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 ^ CC == 3.
+define void @bar_03() {
+; CHECK-LABEL: bar_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnlh dummy at PLT
+; CHECK-NEXT:  .LBB22_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  switch i32 %0, label %if.end [
+    i32 3, label %if.then
+    i32 0, label %if.then
+  ]
+
+if.then:                                          ; preds = %entry, %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret void
+}
+
+; Test CC == 1 ^ CC == 2.
+define void @bar_12() {
+; CHECK-LABEL: bar_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB23_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %xor4.i = icmp ult i32 %2, -2
+  br i1 %xor4.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 ^ CC == 3.
+define void @bar_13() {
+; CHECK-LABEL: bar_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnhe dummy at PLT
+; CHECK-NEXT:  .LBB24_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 2 ^ CC == 3.
+define void @bar_23() {
+; CHECK-LABEL: bar_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnle dummy at PLT
+; CHECK-NEXT:  .LBB25_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor4.i = icmp samesign ult i32 %0, 2
+  br i1 %xor4.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 ^ CC == 1 ^ CC == 2.
+define void @bar_012() {
+; CHECK-LABEL: bar_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB26_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 3
+  br i1 %xor59.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 ^ CC == 1 ^ CC == 3.
+define void @bar_013() {
+; CHECK-LABEL: bar_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB27_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %switch = icmp eq i32 %0, 2
+  br i1 %switch, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret void
+}
+
+; Test CC == 0 ^ CC == 2 ^ CC == 3.
+define void @bar_023() {
+; CHECK-LABEL: bar_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB28_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp eq i32 %0, 0
+  %cmp1.i = icmp eq i32 %0, 2
+  %xor8.i = xor i1 %cmp.i, %cmp1.i
+  %2 = icmp ne i32 %0, 3
+  %tobool.not = xor i1 %2, %xor8.i
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 ^ CC == 2 ^ CC == 3.
+define void @bar_123() {
+; CHECK-LABEL: bar_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    larl %r1, a
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB29_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %tobool.not = icmp eq i32 %0, 0
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_eq_noteq.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_eq_noteq.ll
new file mode 100644
index 0000000000000..bdbe5ff3ae924
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_eq_noteq.ll
@@ -0,0 +1,1083 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations
+; for XOR for 3 three different functions, including two test cases from heiko.
+; This test checks combinations of EQUAL(==) and NOT EQUAL (!=) operator .e.g.
+; CC == 0 ^ CC != 1 ^ CC != 2 and  CC == 0 ^ CC == 2 ^ CC != 3.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+
+; Test CC == 0 ^ CC != 1.
+define signext range(i32 0, 43) i32 @foo_01(i32 noundef signext %x) {
+; CHECK-LABEL: foo_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5 = icmp samesign ugt i32 %asmresult1, 1
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC != 2
+define signext range(i32 0, 43) i32 @foo_02(i32 noundef signext %x) {
+; CHECK-LABEL: foo_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %tobool.not = icmp eq i32 %2, 0
+  %cond = select i1 %tobool.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo_03(i32 noundef signext %x) {
+; CHECK-LABEL: foo_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp ne i32 %asmresult1, 0
+  %cmp2 = icmp ne i32 %asmresult1, 3
+  %xor5 = and i1 %cmp, %cmp2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 1 ^ CC != 2.
+define signext range(i32 0, 43) i32 @foo_12(i32 noundef signext %x) {
+; CHECK-LABEL: foo_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -3
+  %xor5 = icmp ult i32 %2, -2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 1 ^ CC != 3
+define signext range(i32 0, 43) i32 @foo_13(i32 noundef signext %x) {
+; CHECK-LABEL: foo_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB4_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %asmresult1, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %cond = select i1 %tobool.not.not, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 2 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo_23(i32 noundef signext %x) {
+; CHECK-LABEL: foo_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB5_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %asmresult1, -4
+  %xor5 = icmp samesign ult i32 %2, -2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC != 1 ^ CC != 2.
+define signext range(i32 0, 43) i32 @foo_012(i32 noundef signext %x) {
+; CHECK-LABEL: foo_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB6_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor610.not = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %xor610.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC != 1 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo_013(i32 noundef signext %x) {
+; CHECK-LABEL: foo_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB7_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor9 = icmp samesign ugt i32 %asmresult1, 1
+  %cmp4 = icmp ne i32 %asmresult1, 3
+  %xor610 = xor i1 %xor9, %cmp4
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC != 2 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo_023(i32 noundef signext %x) {
+; CHECK-LABEL: foo_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB8_1: # %entry
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %asmresult1 to i1
+  %3 = icmp eq i32 %asmresult1, 3
+  %tobool.not = xor i1 %3, %2
+  %cond = select i1 %tobool.not, i32 0, i32 42
+  ret i32 %cond
+}
+
+; Test CC == 1 ^ CC != 2 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo_123(i32 noundef signext %x) {
+; CHECK-LABEL: foo_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bner %r14
+; CHECK-NEXT:  .LBB9_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -3
+  %xor9 = icmp ult i32 %2, -2
+  %cmp4 = icmp ne i32 %asmresult1, 3
+  %xor610 = xor i1 %cmp4, %xor9
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC == 1 ^ CC != 2.
+define signext range(i32 0, 43) i32 @foo1_012(i32 noundef signext %x) {
+; CHECK-LABEL: foo1_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB10_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor610 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC == 1 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo1_013(i32 noundef signext %x) {
+; CHECK-LABEL: foo1_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB11_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor9 = icmp samesign ugt i32 %asmresult1, 1
+  %cmp4 = icmp ne i32 %asmresult1, 3
+  %xor610 = and i1 %xor9, %cmp4
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 0 ^ CC == 2 ^ CC != 3
+define signext range(i32 0, 43) i32 @foo1_023(i32 noundef signext %x) {
+; CHECK-LABEL: foo1_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB12_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp eq i32 %asmresult1, 0
+  %cmp2 = icmp eq i32 %asmresult1, 2
+  %xor9 = xor i1 %cmp, %cmp2
+  %cmp4 = icmp ne i32 %asmresult1, 3
+  %xor610 = xor i1 %cmp4, %xor9
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC == 1 ^ CC == 2 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo1_123(i32 noundef signext %x) {
+; CHECK-LABEL: foo1_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB13_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor610 = icmp eq i32 %asmresult1, 0
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+
+ at a = global i32 0, align 4
+
+; Test CC == 0 ^ CC != 1.
+define range(i64 5, 9) i64 @fu_01() {
+; CHECK-LABEL: fu_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB14_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor4.i = icmp samesign ult i32 %0, 2
+  %. = select i1 %xor4.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC != 2
+define range(i64 5, 9) i64 @fu_02() {
+; CHECK-LABEL: fu_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB15_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_03() {
+; CHECK-LABEL: fu_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB16_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp eq i32 %0, 0
+  %cmp1.i = icmp eq i32 %0, 3
+  %xor4.i.not = or i1 %cmp.i, %cmp1.i
+  %. = select i1 %xor4.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 1 ^ CC != 2.
+define range(i64 5, 9) i64 @fu_12() {
+; CHECK-LABEL: fu_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB17_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -1
+  %xor4.i = icmp ult i32 %2, 2
+  %. = select i1 %xor4.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 1 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_13() {
+; CHECK-LABEL: fu_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB18_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 2 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_23() {
+; CHECK-LABEL: fu_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB19_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %xor4.i = icmp samesign ugt i32 %2, -3
+  %. = select i1 %xor4.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC != 1 ^ CC != 2.
+define range(i64 5, 9) i64 @fu_012() {
+; CHECK-LABEL: fu_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB20_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 3
+  %. = select i1 %xor59.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC != 1 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_013() {
+; CHECK-LABEL: fu_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB21_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor8.i = icmp samesign ugt i32 %0, 1
+  %2 = icmp ne i32 %0, 3
+  %tobool.not = and i1 %xor8.i, %2
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC != 2 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_023() {
+; CHECK-LABEL: fu_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB22_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp eq i32 %0, 3
+  %tobool.not = xor i1 %3, %2
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 1 ^ CC != 2 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_123() {
+; CHECK-LABEL: fu_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB23_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %tobool.not = icmp eq i32 %0, 0
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC == 1 ^ CC != 2.
+define range(i64 5, 9) i64 @fu1_012() {
+; CHECK-LABEL: fu1_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB24_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 3
+  %. = select i1 %xor59.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC == 1 ^ CC != 3.
+define range(i64 5, 9) i64 @fu1_013() {
+; CHECK-LABEL: fu1_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB25_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor8.i = icmp samesign ult i32 %0, 2
+  %cmp3.i = icmp eq i32 %0, 3
+  %xor59.i.not = or i1 %xor8.i, %cmp3.i
+  %. = select i1 %xor59.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC == 2 ^ CC != 3.
+define range(i64 5, 9) i64 @fu1_023() {
+; CHECK-LABEL: fu1_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlr %r14
+; CHECK-NEXT:  .LBB26_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp eq i32 %0, 0
+  %cmp1.i = icmp eq i32 %0, 2
+  %xor8.i = xor i1 %cmp.i, %cmp1.i
+  %2 = icmp eq i32 %0, 3
+  %tobool.not = xor i1 %2, %xor8.i
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC == 1 ^ CC == 2 ^ CC != 3.
+define range(i64 5, 9) i64 @fu1_123() {
+; CHECK-LABEL: fu1_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB27_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 0
+  %. = select i1 %xor59.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC == 0 ^ CC != 1.
+define void @bar_01() {
+; CHECK-LABEL: bar_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB28_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor4.i = icmp samesign ugt i32 %0, 1
+  br i1 %xor4.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+declare void @dummy() local_unnamed_addr #1
+
+; Test CC == 0 ^ CC != 2
+define void @bar_02() {
+; CHECK-LABEL: bar_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jghe dummy at PLT
+; CHECK-NEXT:  .LBB29_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 ^ CC != 3.
+define void @bar_03() {
+; CHECK-LABEL: bar_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB30_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  switch i32 %0, label %if.then [
+    i32 3, label %if.end
+    i32 0, label %if.end
+  ]
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %entry, %if.then
+  ret void
+}
+
+; Test CC == 1 ^ CC != 2.
+define void @bar_12() {
+; CHECK-LABEL: bar_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnlh dummy at PLT
+; CHECK-NEXT:  .LBB31_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -1
+  %xor4.i = icmp ult i32 %2, 2
+  br i1 %xor4.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 ^ CC != 3.
+define void @bar_13() {
+; CHECK-LABEL: bar_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jghe dummy at PLT
+; CHECK-NEXT:  .LBB32_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 2 ^ CC != 3.
+define void @bar_23() {
+; CHECK-LABEL: bar_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB33_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = or disjoint i32 %0, -4
+  %xor4.i = icmp samesign ugt i32 %2, -3
+  br i1 %xor4.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 ^ CC != 1 ^ CC != 2.
+define void @bar_012() {
+; CHECK-LABEL: bar_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgno dummy at PLT
+; CHECK-NEXT:  .LBB34_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 3
+  br i1 %xor59.i.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 ^ CC != 1 ^ CC != 3.
+define void @bar_013() {
+; CHECK-LABEL: bar_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnh dummy at PLT
+; CHECK-NEXT:  .LBB35_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %switch = icmp eq i32 %0, 2
+  br i1 %switch, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret void
+}
+
+; Test CC == 0 ^ CC != 2 ^ CC != 3.
+define void @bar_023() {
+; CHECK-LABEL: bar_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:  .LBB36_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = trunc i32 %0 to i1
+  %3 = icmp eq i32 %0, 3
+  %tobool.not = xor i1 %3, %2
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 ^ CC != 2 ^ CC != 3.
+define void @bar_123() {
+; CHECK-LABEL: bar_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgne dummy at PLT
+; CHECK-NEXT:  .LBB37_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %tobool.not = icmp eq i32 %0, 0
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 ^ CC == 1 ^ CC != 2.
+define void @bar1_012() {
+; CHECK-LABEL: bar1_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB38_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 3
+  br i1 %xor59.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 0 ^ CC == 1 ^ CC != 3.
+define void @bar1_013() {
+; CHECK-LABEL: bar1_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgh dummy at PLT
+; CHECK-NEXT:  .LBB39_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %switch = icmp eq i32 %0, 2
+  br i1 %switch, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret void
+}
+
+; Test CC == 0 ^ CC == 2 ^ CC != 3.
+define void @bar1_023() {
+; CHECK-LABEL: bar1_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB40_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp eq i32 %0, 0
+  %cmp1.i = icmp eq i32 %0, 2
+  %xor8.i = xor i1 %cmp.i, %cmp1.i
+  %2 = icmp eq i32 %0, 3
+  %tobool.not = xor i1 %2, %xor8.i
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC == 1 ^ CC == 2 ^ CC !=3.
+define void @bar1_123() {
+; CHECK-LABEL: bar1_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB41_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 0
+  br i1 %xor59.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_not.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_not.ll
new file mode 100644
index 0000000000000..47410d28f80e3
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_not.ll
@@ -0,0 +1,778 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test Flag Output Operands with 14 combinations of CCMASK and optimizations
+; for XOR for 3 three different functions, including two test cases from heiko.
+; This test checks NOT EQUAL (!=), e.g.  CC != 0 ^ CC != 1 ^ CC != 2.
+
+; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
+
+; Test CC != 0 ^ CC != 1.
+define signext range(i32 0, 43) i32 @foo_01(i32 noundef signext %x) {
+; CHECK-LABEL: foo_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5 = icmp samesign ult i32 %asmresult1, 2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+declare void @llvm.assume(i1 noundef) #1
+
+; Test CC != 0 ^ CC != 2.
+define signext range(i32 0, 43) i32 @foo_02(i32 noundef signext %x) {
+; CHECK-LABEL: foo_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp ne i32 %asmresult1, 0
+  %cmp2 = icmp ne i32 %asmresult1, 2
+  %xor5 = xor i1 %cmp, %cmp2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 0 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo_03(i32 noundef signext %x) {
+; CHECK-LABEL: foo_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp ne i32 %asmresult1, 0
+  %cmp2 = icmp ne i32 %asmresult1, 3
+  %xor5 = xor i1 %cmp, %cmp2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 1 ^ CC != 2.
+define signext range(i32 0, 43) i32 @foo_12(i32 noundef signext %x) {
+; CHECK-LABEL: foo_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %asmresult1, -1
+  %xor5 = icmp ult i32 %2, 2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 1 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo_13(i32 noundef signext %x) {
+; CHECK-LABEL: foo_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnher %r14
+; CHECK-NEXT:  .LBB4_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp ne i32 %asmresult1, 1
+  %cmp2 = icmp ne i32 %asmresult1, 3
+  %xor5 = xor i1 %cmp, %cmp2
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 2 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo_23(i32 noundef signext %x) {
+; CHECK-LABEL: foo_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB5_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor5 = icmp samesign ugt i32 %asmresult1, 1
+  %cond = select i1 %xor5, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 0 ^ CC != 1 ^ CC != 2.
+define signext range(i32 0, 43) i32 @foo_012(i32 noundef signext %x) {
+; CHECK-LABEL: foo_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB6_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor610 = icmp eq i32 %asmresult1, 3
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 0 ^ CC != 1 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo_013(i32 noundef signext %x) {
+; CHECK-LABEL: foo_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bhr %r14
+; CHECK-NEXT:  .LBB7_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor9 = icmp samesign ugt i32 %asmresult1, 1
+  %cmp4 = icmp ne i32 %asmresult1, 3
+  %xor610 = and i1 %xor9, %cmp4
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 0 ^ CC != 2 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo_023(i32 noundef signext %x) {
+; CHECK-LABEL: foo_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    blr %r14
+; CHECK-NEXT:  .LBB8_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp = icmp ne i32 %asmresult1, 0
+  %cmp2 = icmp ne i32 %asmresult1, 2
+  %xor9 = xor i1 %cmp, %cmp2
+  %cmp4 = icmp ne i32 %asmresult1, 3
+  %xor610 = xor i1 %cmp4, %xor9
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+; Test CC != 1 ^ CC != 2 ^ CC != 3.
+define signext range(i32 0, 43) i32 @foo_123(i32 noundef signext %x) {
+; CHECK-LABEL: foo_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    ahi %r2, 42
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB9_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
+  %asmresult1 = extractvalue { i32, i32 } %0, 1
+  %1 = icmp ult i32 %asmresult1, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor610 = icmp eq i32 %asmresult1, 0
+  %cond = select i1 %xor610, i32 42, i32 0
+  ret i32 %cond
+}
+
+
+ at a = global i32 0, align 4
+
+; Test CC != 0 ^ CC != 1.
+define range(i64 5, 9) i64 @fu_01() {
+; CHECK-LABEL: fu_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnler %r14
+; CHECK-NEXT:  .LBB10_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor4.i = icmp samesign ugt i32 %0, 1
+  %. = select i1 %xor4.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 0 ^ CC != 2.
+define range(i64 5, 9) i64 @fu_02() {
+; CHECK-LABEL: fu_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB11_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC != 0 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_03() {
+; CHECK-LABEL: fu_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    blhr %r14
+; CHECK-NEXT:  .LBB12_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp ne i32 %0, 0
+  %2 = icmp ne i32 %0, 3
+  %tobool.not = and i1 %cmp.i, %2
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 1 ^ CC != 2.
+define range(i64 5, 9) i64 @fu_12() {
+; CHECK-LABEL: fu_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlhr %r14
+; CHECK-NEXT:  .LBB13_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %xor4.i = icmp ult i32 %2, -2
+  %. = select i1 %xor4.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 1 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_13() {
+; CHECK-LABEL: fu_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB14_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not.not = icmp eq i32 %2, 0
+  %. = select i1 %tobool.not.not.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 2 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_23() {
+; CHECK-LABEL: fu_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bler %r14
+; CHECK-NEXT:  .LBB15_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor4.i = icmp samesign ult i32 %0, 2
+  %. = select i1 %xor4.i, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 0 ^ CC != 1 ^ CC != 2.
+define range(i64 5, 9) i64 @fu_012() {
+; CHECK-LABEL: fu_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB16_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 3
+  %. = select i1 %xor59.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC != 0 ^ CC != 1 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_013() {
+; CHECK-LABEL: fu_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnhr %r14
+; CHECK-NEXT:  .LBB17_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor8.i = icmp samesign ult i32 %0, 2
+  %cmp3.i = icmp eq i32 %0, 3
+  %xor59.i.not = or i1 %xor8.i, %cmp3.i
+  %. = select i1 %xor59.i.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 0 ^ CC != 2 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_023() {
+; CHECK-LABEL: fu_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    bnlr %r14
+; CHECK-NEXT:  .LBB18_1: # %entry
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp ne i32 %0, 0
+  %cmp1.i = icmp ne i32 %0, 2
+  %xor8.i = xor i1 %cmp.i, %cmp1.i
+  %2 = icmp eq i32 %0, 3
+  %tobool.not = xor i1 %2, %xor8.i
+  %. = select i1 %tobool.not, i64 8, i64 5
+  ret i64 %.
+}
+
+; Test CC != 1 ^ CC != 2 ^ CC != 3.
+define range(i64 5, 9) i64 @fu_123() {
+; CHECK-LABEL: fu_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    lghi %r2, 5
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB19_1: # %entry
+; CHECK-NEXT:    lghi %r2, 8
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #2
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 0
+  %. = select i1 %xor59.i.not, i64 5, i64 8
+  ret i64 %.
+}
+
+; Test CC != 0 ^ CC != 1.
+define void @bar_01() {
+; CHECK-LABEL: bar_01:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgle dummy at PLT
+; CHECK-NEXT:  .LBB20_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor4.i = icmp samesign ugt i32 %0, 1
+  br i1 %xor4.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+declare void @dummy() local_unnamed_addr #1
+
+; Test CC != 0 ^ CC != 2.
+define void @bar_02() {
+; CHECK-LABEL: bar_02:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jghe dummy at PLT
+; CHECK-NEXT:  .LBB21_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 0 ^ CC != 3.
+define void @bar_03() {
+; CHECK-LABEL: bar_03:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnlh dummy at PLT
+; CHECK-NEXT:  .LBB22_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  switch i32 %0, label %if.end [
+    i32 3, label %if.then
+    i32 0, label %if.then
+  ]
+
+if.then:                                          ; preds = %entry, %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret void
+}
+
+; Test CC != 1 ^ CC != 2.
+define void @bar_12() {
+; CHECK-LABEL: bar_12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jglh dummy at PLT
+; CHECK-NEXT:  .LBB23_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = add nsw i32 %0, -3
+  %xor4.i = icmp ult i32 %2, -2
+  br i1 %xor4.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 1 ^ CC != 3.
+define void @bar_13() {
+; CHECK-LABEL: bar_13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnhe dummy at PLT
+; CHECK-NEXT:  .LBB24_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %2 = and i32 %0, 1
+  %tobool.not.not.not = icmp eq i32 %2, 0
+  br i1 %tobool.not.not.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 2 ^ CC != 3.
+define void @bar_23() {
+; CHECK-LABEL: bar_23:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgnle dummy at PLT
+; CHECK-NEXT:  .LBB25_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor4.i = icmp samesign ult i32 %0, 2
+  br i1 %xor4.i, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 0 ^ CC != 1 ^ CC != 2.
+define void @bar_012() {
+; CHECK-LABEL: bar_012:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB26_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 3
+  br i1 %xor59.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 0 ^ CC != 1 ^ CC != 3.
+define void @bar_013() {
+; CHECK-LABEL: bar_013:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgh dummy at PLT
+; CHECK-NEXT:  .LBB27_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %switch = icmp eq i32 %0, 2
+  br i1 %switch, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %entry, %if.then
+  ret void
+}
+
+; Test CC != 0 ^ CC != 2 ^ CC != 3.
+define void @bar_023() {
+; CHECK-LABEL: bar_023:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:  .LBB28_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %cmp.i = icmp ne i32 %0, 0
+  %cmp1.i = icmp ne i32 %0, 2
+  %xor8.i = xor i1 %cmp.i, %cmp1.i
+  %2 = icmp eq i32 %0, 3
+  %tobool.not = xor i1 %2, %xor8.i
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; Test CC != 1 ^ CC != 2 ^ CC != 3.
+define void @bar_123() {
+; CHECK-LABEL: bar_123:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lgrl %r1, a at GOT
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    alsi 0(%r1), -1
+; CHECK-EMPTY:
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    jge dummy at PLT
+; CHECK-NEXT:  .LBB29_1: # %if.end
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
+  %1 = icmp ult i32 %0, 4
+  tail call void @llvm.assume(i1 %1)
+  %xor59.i.not = icmp eq i32 %0, 0
+  br i1 %xor59.i.not, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  tail call void @dummy() #3
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+

>From d3f3c036d31af0e9da6cc8d1382e582d022e5087 Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Fri, 7 Feb 2025 01:29:12 +0100
Subject: [PATCH 02/12] Removed triple check in CGStmt.cpp and using StringRef
 in SystemZ.h.

---
 clang/include/clang/Basic/TargetInfo.h |  5 +++++
 clang/lib/Basic/Targets/SystemZ.cpp    |  2 +-
 clang/lib/Basic/Targets/SystemZ.h      |  2 ++
 clang/lib/CodeGen/CGStmt.cpp           | 10 +++++-----
 4 files changed, 13 insertions(+), 6 deletions(-)

diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index 25eda907d20a7..4d40936676268 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -1212,6 +1212,11 @@ class TargetInfo : public TransferrableTargetInfo,
                              std::string &/*SuggestedModifier*/) const {
     return true;
   }
+
+  // CC is binary on most targets. SystemZ overrides it as CC interval is
+  // [0, 4).
+  virtual unsigned getFlagOutputCCUpperBound() const { return 2; }
+
   virtual bool
   validateAsmConstraint(const char *&Name,
                         TargetInfo::ConstraintInfo &info) const = 0;
diff --git a/clang/lib/Basic/Targets/SystemZ.cpp b/clang/lib/Basic/Targets/SystemZ.cpp
index 49f88b45220d0..e6be8b7563892 100644
--- a/clang/lib/Basic/Targets/SystemZ.cpp
+++ b/clang/lib/Basic/Targets/SystemZ.cpp
@@ -92,7 +92,7 @@ bool SystemZTargetInfo::validateAsmConstraint(
     return true;
   case '@':
     // CC condition changes.
-    if (strlen(Name) >= 3 && *(Name + 1) == 'c' && *(Name + 2) == 'c') {
+    if (!StringRef("@cc").compare(Name)) {
       Name += 2;
       Info.setAllowsRegister();
       return true;
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index a6909ababdec0..fc23a0bcd6234 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -114,6 +114,8 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
     return RegName == "r15";
   }
 
+  // CC has interval [0, 4).
+  unsigned getFlagOutputCCUpperBound() const override { return 4; }
   bool validateAsmConstraint(const char *&Name,
                              TargetInfo::ConstraintInfo &info) const override;
 
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 27f7bb6528958..de91d894872a2 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -2563,11 +2563,11 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
     if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
       // Target must guarantee the Value `Tmp` here is lowered to a boolean
       // value.
-      unsigned CCUpperBound = 2;
-      if (CGF.getTarget().getTriple().getArch() == llvm::Triple::systemz) {
-        // On this target CC value can be in range [0, 3].
-        CCUpperBound = 4;
-      }
+      // Lowering 'Tmp' as - 'icmp ult %Tmp , CCUpperBound'. On some targets
+      // CCUpperBound is not binary. CCUpperBound is 4 for SystemZ,
+      // interval [0, 4). With this range known, llvm.assume intrinsic guides
+      // optimizer to generate more optimized IR. Verified it for SystemZ.
+      unsigned CCUpperBound = CGF.getTarget().getFlagOutputCCUpperBound();
       llvm::Constant *CCUpperBoundConst =
           llvm::ConstantInt::get(Tmp->getType(), CCUpperBound);
       llvm::Value *IsBooleanValue =

>From 062e03aa7db7611a0a6d30b735abde8de4edbc4a Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Fri, 7 Feb 2025 17:02:30 +0100
Subject: [PATCH 03/12] clang test causing pr build failure.

---
 .../CodeGen/inline-asm-systemz-flag-output.c  | 144 ++----------------
 1 file changed, 14 insertions(+), 130 deletions(-)

diff --git a/clang/test/CodeGen/inline-asm-systemz-flag-output.c b/clang/test/CodeGen/inline-asm-systemz-flag-output.c
index ab90e031df1f2..68ba0dad26bbc 100644
--- a/clang/test/CodeGen/inline-asm-systemz-flag-output.c
+++ b/clang/test/CodeGen/inline-asm-systemz-flag-output.c
@@ -1,149 +1,33 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
-// RUN: %clang_cc1 -triple s390x-linux -emit-llvm -o - %s | FileCheck %s
-// CHECK-LABEL: define dso_local signext i32 @foo_012(
-// CHECK-SAME: i32 noundef signext [[X:%.*]]) #[[ATTR0:[0-9]+]] {
-// CHECK-NEXT:  [[ENTRY:.*]]:
-// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    [[CC:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    store i32 [[X]], ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 [[TMP0]]) #[[ATTR2:[0-9]+]], !srcloc [[META2:![0-9]+]]
-// CHECK-NEXT:    [[ASMRESULT:%.*]] = extractvalue { i32, i32 } [[TMP1]], 0
-// CHECK-NEXT:    [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP1]], 1
-// CHECK-NEXT:    store i32 [[ASMRESULT]], ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[ASMRESULT1]], 4
-// CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
-// CHECK-NEXT:    store i32 [[ASMRESULT1]], ptr [[CC]], align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP3]], 0
-// CHECK-NEXT:    br i1 [[CMP]], label %[[LOR_END:.*]], label %[[LOR_LHS_FALSE:.*]]
-// CHECK:       [[LOR_LHS_FALSE]]:
-// CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[TMP4]], 1
-// CHECK-NEXT:    br i1 [[CMP2]], label %[[LOR_END]], label %[[LOR_RHS:.*]]
-// CHECK:       [[LOR_RHS]]:
-// CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP3:%.*]] = icmp eq i32 [[TMP5]], 2
-// CHECK-NEXT:    br label %[[LOR_END]]
-// CHECK:       [[LOR_END]]:
-// CHECK-NEXT:    [[TMP6:%.*]] = phi i1 [ true, %[[LOR_LHS_FALSE]] ], [ true, %[[ENTRY]] ], [ [[CMP3]], %[[LOR_RHS]] ]
-// CHECK-NEXT:    [[TMP7:%.*]] = zext i1 [[TMP6]] to i64
-// CHECK-NEXT:    [[COND:%.*]] = select i1 [[TMP6]], i32 42, i32 0
-// CHECK-NEXT:    ret i32 [[COND]]
-//
+// RUN: %clang_cc1 -O2 -triple s390x-linux -emit-llvm -o - %s | FileCheck %s
+
 int foo_012(int x) {
+// CHECK-LABEL: @foo_012
+// CHECK: = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x)
   int cc;
-  asm volatile ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
+  asm ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
   return cc == 0 || cc == 1 || cc == 2 ? 42 : 0;
 }
 
-// CHECK-LABEL: define dso_local signext i32 @foo_013(
-// CHECK-SAME: i32 noundef signext [[X:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*]]:
-// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    [[CC:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    store i32 [[X]], ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 [[TMP0]]) #[[ATTR2]], !srcloc [[META3:![0-9]+]]
-// CHECK-NEXT:    [[ASMRESULT:%.*]] = extractvalue { i32, i32 } [[TMP1]], 0
-// CHECK-NEXT:    [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP1]], 1
-// CHECK-NEXT:    store i32 [[ASMRESULT]], ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[ASMRESULT1]], 4
-// CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
-// CHECK-NEXT:    store i32 [[ASMRESULT1]], ptr [[CC]], align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP3]], 0
-// CHECK-NEXT:    br i1 [[CMP]], label %[[LOR_END:.*]], label %[[LOR_LHS_FALSE:.*]]
-// CHECK:       [[LOR_LHS_FALSE]]:
-// CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[TMP4]], 1
-// CHECK-NEXT:    br i1 [[CMP2]], label %[[LOR_END]], label %[[LOR_RHS:.*]]
-// CHECK:       [[LOR_RHS]]:
-// CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP3:%.*]] = icmp eq i32 [[TMP5]], 3
-// CHECK-NEXT:    br label %[[LOR_END]]
-// CHECK:       [[LOR_END]]:
-// CHECK-NEXT:    [[TMP6:%.*]] = phi i1 [ true, %[[LOR_LHS_FALSE]] ], [ true, %[[ENTRY]] ], [ [[CMP3]], %[[LOR_RHS]] ]
-// CHECK-NEXT:    [[TMP7:%.*]] = zext i1 [[TMP6]] to i64
-// CHECK-NEXT:    [[COND:%.*]] = select i1 [[TMP6]], i32 42, i32 0
-// CHECK-NEXT:    ret i32 [[COND]]
-//
 int foo_013(int x) {
+// CHECK-LABEL: @foo_013
+// CHECK: = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x)
   int cc;
-  asm volatile ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
+  asm ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
   return cc == 0 || cc == 1 || cc == 3 ? 42 : 0;
 }
 
-// CHECK-LABEL: define dso_local signext i32 @foo_023(
-// CHECK-SAME: i32 noundef signext [[X:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*]]:
-// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    [[CC:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    store i32 [[X]], ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 [[TMP0]]) #[[ATTR2]], !srcloc [[META4:![0-9]+]]
-// CHECK-NEXT:    [[ASMRESULT:%.*]] = extractvalue { i32, i32 } [[TMP1]], 0
-// CHECK-NEXT:    [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP1]], 1
-// CHECK-NEXT:    store i32 [[ASMRESULT]], ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[ASMRESULT1]], 4
-// CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
-// CHECK-NEXT:    store i32 [[ASMRESULT1]], ptr [[CC]], align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP3]], 0
-// CHECK-NEXT:    br i1 [[CMP]], label %[[LOR_END:.*]], label %[[LOR_LHS_FALSE:.*]]
-// CHECK:       [[LOR_LHS_FALSE]]:
-// CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[TMP4]], 2
-// CHECK-NEXT:    br i1 [[CMP2]], label %[[LOR_END]], label %[[LOR_RHS:.*]]
-// CHECK:       [[LOR_RHS]]:
-// CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP3:%.*]] = icmp eq i32 [[TMP5]], 3
-// CHECK-NEXT:    br label %[[LOR_END]]
-// CHECK:       [[LOR_END]]:
-// CHECK-NEXT:    [[TMP6:%.*]] = phi i1 [ true, %[[LOR_LHS_FALSE]] ], [ true, %[[ENTRY]] ], [ [[CMP3]], %[[LOR_RHS]] ]
-// CHECK-NEXT:    [[TMP7:%.*]] = zext i1 [[TMP6]] to i64
-// CHECK-NEXT:    [[COND:%.*]] = select i1 [[TMP6]], i32 42, i32 0
-// CHECK-NEXT:    ret i32 [[COND]]
-//
 int foo_023(int x) {
+// CHECK-LABEL: @foo_023
+// CHECK: = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x)
   int cc;
-  asm volatile ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
+  asm ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
   return cc == 0 || cc == 2 || cc == 3 ? 42 : 0;
 }
 
-// CHECK-LABEL: define dso_local signext i32 @foo_123(
-// CHECK-SAME: i32 noundef signext [[X:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*]]:
-// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    [[CC:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    store i32 [[X]], ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 [[TMP0]]) #[[ATTR2]], !srcloc [[META5:![0-9]+]]
-// CHECK-NEXT:    [[ASMRESULT:%.*]] = extractvalue { i32, i32 } [[TMP1]], 0
-// CHECK-NEXT:    [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP1]], 1
-// CHECK-NEXT:    store i32 [[ASMRESULT]], ptr [[X_ADDR]], align 4
-// CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[ASMRESULT1]], 4
-// CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
-// CHECK-NEXT:    store i32 [[ASMRESULT1]], ptr [[CC]], align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP3]], 1
-// CHECK-NEXT:    br i1 [[CMP]], label %[[LOR_END:.*]], label %[[LOR_LHS_FALSE:.*]]
-// CHECK:       [[LOR_LHS_FALSE]]:
-// CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[TMP4]], 2
-// CHECK-NEXT:    br i1 [[CMP2]], label %[[LOR_END]], label %[[LOR_RHS:.*]]
-// CHECK:       [[LOR_RHS]]:
-// CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[CC]], align 4
-// CHECK-NEXT:    [[CMP3:%.*]] = icmp eq i32 [[TMP5]], 3
-// CHECK-NEXT:    br label %[[LOR_END]]
-// CHECK:       [[LOR_END]]:
-// CHECK-NEXT:    [[TMP6:%.*]] = phi i1 [ true, %[[LOR_LHS_FALSE]] ], [ true, %[[ENTRY]] ], [ [[CMP3]], %[[LOR_RHS]] ]
-// CHECK-NEXT:    [[TMP7:%.*]] = zext i1 [[TMP6]] to i64
-// CHECK-NEXT:    [[COND:%.*]] = select i1 [[TMP6]], i32 42, i32 0
-// CHECK-NEXT:    ret i32 [[COND]]
-//
 int foo_123(int x) {
+// CHECK-LABEL: @foo_123
+// CHECK: = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x)
   int cc;
-  asm volatile ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
+  asm ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
   return cc == 1 || cc == 2 || cc == 3 ? 42 : 0;
 }

>From 5aef5641cb3b509a97b558ba7b619252fed421ec Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Mon, 10 Feb 2025 19:46:58 +0100
Subject: [PATCH 04/12] Add Preprocessor test for flag output operand and some
 cleanup for c string.

---
 clang/lib/Basic/Targets/SystemZ.h                | 5 +++--
 clang/test/Preprocessor/systemz_asm_flag_outut.c | 4 ++++
 2 files changed, 7 insertions(+), 2 deletions(-)
 create mode 100644 clang/test/Preprocessor/systemz_asm_flag_outut.c

diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index fc23a0bcd6234..d5823381dc9c3 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -120,8 +120,9 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
                              TargetInfo::ConstraintInfo &info) const override;
 
   std::string convertConstraint(const char *&Constraint) const override {
-    if (strncmp(Constraint, "@cc", 3) == 0) {
-      std::string Converted = "{" + std::string(Constraint, 3) + "}";
+    if (llvm::StringRef(Constraint).starts_with("@cc")) {
+      std::string Converted =
+          std::string("{") + std::string("@cc") + std::string("}");
       Constraint += 3;
       return Converted;
     }
diff --git a/clang/test/Preprocessor/systemz_asm_flag_outut.c b/clang/test/Preprocessor/systemz_asm_flag_outut.c
new file mode 100644
index 0000000000000..b627499d5ce46
--- /dev/null
+++ b/clang/test/Preprocessor/systemz_asm_flag_outut.c
@@ -0,0 +1,4 @@
+// RUN: %clang -target systemz-unknown-unknown -x c -E -dM -o - %s | FileCheck -match-full-lines %s
+// RUN: %clang -target s390x-unknown-unknown -x c -E -dM -o - %s | FileCheck -match-full-lines %s
+
+// CHECK: #define __GCC_ASM_FLAG_OUTPUTS__ 1

>From 2ab7a75c10a366a4b693be15dc84d253569807e8 Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Tue, 11 Feb 2025 19:23:17 +0100
Subject: [PATCH 05/12] Fixed clang outputting extra byte '\7F' in clang test
 and fix a warning in SystemZISelLowering.cpp.

---
 clang/lib/Basic/Targets/SystemZ.h               | 5 +++--
 llvm/lib/Target/SystemZ/SystemZISelLowering.cpp | 4 ++++
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index d5823381dc9c3..fc2584142a78e 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -121,9 +121,10 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
 
   std::string convertConstraint(const char *&Constraint) const override {
     if (llvm::StringRef(Constraint).starts_with("@cc")) {
+      auto Len = llvm::StringRef("@cc").size();
       std::string Converted =
-          std::string("{") + std::string("@cc") + std::string("}");
-      Constraint += 3;
+          std::string("{") + std::string(Constraint, Len) + std::string("}");
+      Constraint += Len - 1;
       return Converted;
     }
     switch (Constraint[0]) {
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 259da48a3b223..aff2db95d494b 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -8012,6 +8012,10 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
           NewCCMask = NewCCMask1;
         else if (isSRL_IPM_CCSequence(AndOp2.getNode()) && IsOp2)
           NewCCMask = NewCCMask2;
+        else {
+          CCValid = RestoreCCValid;
+          return false;
+        }
         // Bit 29 set => CC == 2 || CC == 3.
         if ((NewCCMask & 0x3) == 2)
           NewCCMask = SystemZ::CCMASK_2 | SystemZ::CCMASK_3;

>From fccc70e04fa764c7adf9f00dfb624b356b12406a Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Thu, 13 Feb 2025 14:39:55 +0100
Subject: [PATCH 06/12] Incorporated suggestions in review.

---
 clang/include/clang/Basic/TargetInfo.h        | 16 ++--
 clang/lib/Basic/Targets/SystemZ.cpp           |  1 +
 clang/lib/Basic/Targets/SystemZ.h             |  7 +-
 clang/lib/CodeGen/CGStmt.cpp                  | 14 +++-
 .../SelectionDAG/SelectionDAGBuilder.cpp      | 34 +++-----
 .../Target/SystemZ/SystemZISelLowering.cpp    | 80 ++++++++++---------
 6 files changed, 80 insertions(+), 72 deletions(-)

diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index 648980f68895f..d4a18ce01f6bc 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -1114,10 +1114,12 @@ class TargetInfo : public TransferrableTargetInfo,
 
     std::string ConstraintStr;  // constraint: "=rm"
     std::string Name;           // Operand name: [foo] with no []'s.
+    unsigned FlagOutputCCUpperBound;
+
   public:
     ConstraintInfo(StringRef ConstraintStr, StringRef Name)
         : Flags(0), TiedOperand(-1), ConstraintStr(ConstraintStr.str()),
-          Name(Name.str()) {
+          Name(Name.str()), FlagOutputCCUpperBound(2) {
       ImmRange.Min = ImmRange.Max = 0;
       ImmRange.isConstrained = false;
     }
@@ -1188,6 +1190,14 @@ class TargetInfo : public TransferrableTargetInfo,
       TiedOperand = N;
       // Don't copy Name or constraint string.
     }
+
+    // CC range can be set by target. SystemZ sets it to 4. It is 2 by default.
+    void setFlagOutputCCUpperBound(unsigned CCBound) {
+      FlagOutputCCUpperBound = CCBound;
+    }
+    unsigned getFlagOutputCCUpperBound() const {
+      return FlagOutputCCUpperBound;
+    }
   };
 
   /// Validate register name used for global register variables.
@@ -1229,10 +1239,6 @@ class TargetInfo : public TransferrableTargetInfo,
     return true;
   }
 
-  // CC is binary on most targets. SystemZ overrides it as CC interval is
-  // [0, 4).
-  virtual unsigned getFlagOutputCCUpperBound() const { return 2; }
-
   virtual bool
   validateAsmConstraint(const char *&Name,
                         TargetInfo::ConstraintInfo &info) const = 0;
diff --git a/clang/lib/Basic/Targets/SystemZ.cpp b/clang/lib/Basic/Targets/SystemZ.cpp
index e848c04530908..c7725a8bd54a0 100644
--- a/clang/lib/Basic/Targets/SystemZ.cpp
+++ b/clang/lib/Basic/Targets/SystemZ.cpp
@@ -103,6 +103,7 @@ bool SystemZTargetInfo::validateAsmConstraint(
     if (!StringRef("@cc").compare(Name)) {
       Name += 2;
       Info.setAllowsRegister();
+      Info.setFlagOutputCCUpperBound(4);
       return true;
     }
     return false;
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index abd7fb4566860..6db148dc45db8 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -115,16 +115,13 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
     return RegName == "r15";
   }
 
-  // CC has interval [0, 4).
-  unsigned getFlagOutputCCUpperBound() const override { return 4; }
   bool validateAsmConstraint(const char *&Name,
                              TargetInfo::ConstraintInfo &info) const override;
 
   std::string convertConstraint(const char *&Constraint) const override {
-    if (llvm::StringRef(Constraint).starts_with("@cc")) {
+    if (llvm::StringRef(Constraint) == "@cc") {
       auto Len = llvm::StringRef("@cc").size();
-      std::string Converted =
-          std::string("{") + std::string(Constraint, Len) + std::string("}");
+      std::string Converted = std::string("{@cc}");
       Constraint += Len - 1;
       return Converted;
     }
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 41ee1939d6b34..2055de97a5864 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -2624,8 +2624,18 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
       // Lowering 'Tmp' as - 'icmp ult %Tmp , CCUpperBound'. On some targets
       // CCUpperBound is not binary. CCUpperBound is 4 for SystemZ,
       // interval [0, 4). With this range known, llvm.assume intrinsic guides
-      // optimizer to generate more optimized IR. Verified it for SystemZ.
-      unsigned CCUpperBound = CGF.getTarget().getFlagOutputCCUpperBound();
+      // optimizer to generate more optimized IR in most of the cases as
+      // observed for select_cc on SystemZ unit tests for flag output operands.
+      // For some cases for br_cc, generated IR was weird. e.g. switch table
+      // for simple simple comparison terms for br_cc.
+      StringRef Name;
+      if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
+        Name = GAS->getOutputName(i);
+      TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
+      bool IsValid = CGF.getTarget().validateOutputConstraint(Info);
+      (void)IsValid;
+      assert(IsValid && "Failed to parse flag output operand constraint");
+      unsigned CCUpperBound = Info.getFlagOutputCCUpperBound();
       llvm::Constant *CCUpperBoundConst =
           llvm::ConstantInt::get(Tmp->getType(), CCUpperBound);
       llvm::Value *IsBooleanValue =
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index c7e1e89fd53fe..dbb9ac74962be 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2838,11 +2838,6 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
     else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
       Opcode = Instruction::Or;
     auto &TLI = DAG.getTargetLoweringInfo();
-    bool BrSrlIPM = FuncInfo.MF->getTarget().getTargetTriple().getArch() ==
-                    Triple::ArchType::systemz;
-    // For Flag output operands SRL/IPM sequence, we want to avoid
-    // creating switch case, as it creates Basic Block and inhibits
-    // optimization in DAGCombiner for flag output operands.
     const auto checkSRLIPM = [&TLI](const SDValue &Op) {
       if (!Op.getNumOperands())
         return false;
@@ -2859,13 +2854,15 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
       }
       return false;
     };
-    if (BrSrlIPM) {
-      if (NodeMap.count(BOp0) && NodeMap[BOp0].getNode()) {
-        BrSrlIPM &= checkSRLIPM(getValue(BOp0));
-        if (NodeMap.count(BOp1) && NodeMap[BOp1].getNode())
-          BrSrlIPM &= checkSRLIPM(getValue(BOp1));
-      } else
-        BrSrlIPM = false;
+    // Incoming IR here is straight line code, FindMergedConditions splits
+    // condition code sequence across Basic Block. DAGCombiner can't combine
+    // across Basic Block. Identify SRL/IPM/CC sequence for SystemZ and avoid
+    // transformation in FindMergedConditions.
+    bool BrSrlIPM = false;
+    if (NodeMap.count(BOp0) && NodeMap[BOp0].getNode()) {
+      BrSrlIPM |= checkSRLIPM(getValue(BOp0));
+      if (NodeMap.count(BOp1) && NodeMap[BOp1].getNode())
+        BrSrlIPM &= checkSRLIPM(getValue(BOp1));
     }
     if (Opcode && !BrSrlIPM &&
         !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
@@ -12141,20 +12138,15 @@ void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
       const APInt &SmallValue = Small.Low->getValue();
       const APInt &BigValue = Big.Low->getValue();
 
-      // Creating switch cases optimizing tranformation inhibits DAGCombiner
-      // for SystemZ for flag output operands. DAGCobiner compute cumulative
-      // CCMask for flag output operands SRL/IPM sequence, we want to avoid
-      // creating switch case, as it creates Basic Block and inhibits
-      // optimization in DAGCombiner for flag output operands.
-      // cases like (CC == 0) || (CC == 2) || (CC == 3), or
+      // Incoming IR is switch table.Identify SRL/IPM/CC sequence for SystemZ
+      // and we want to avoid splitting condition code sequence across basic
+      // block for cases like (CC == 0) || (CC == 2) || (CC == 3), or
       // (CC == 0) || (CC == 1) ^ (CC == 3), there could potentially be
       // more cases like this.
       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
       bool IsSrlIPM = false;
       if (NodeMap.count(Cond) && NodeMap[Cond].getNode())
-        IsSrlIPM = CurMF->getTarget().getTargetTriple().getArch() ==
-                       Triple::ArchType::systemz &&
-                   TLI.canLowerSRL_IPM_Switch(getValue(Cond));
+        IsSrlIPM = TLI.canLowerSRL_IPM_Switch(getValue(Cond));
       // Check that there is only one bit different.
       APInt CommonBit = BigValue ^ SmallValue;
       if (CommonBit.isPowerOf2() || IsSrlIPM) {
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 0ae5d6a20e83f..94ae61d6d992c 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -1482,6 +1482,14 @@ SystemZTargetLowering::getConstraintType(StringRef Constraint) const {
   return TargetLowering::getConstraintType(Constraint);
 }
 
+// Convert condition code in CCReg to an i32 value.
+static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) {
+  SDLoc DL(CCReg);
+  SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
+  return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
+                     DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
+}
+
 TargetLowering::ConstraintWeight SystemZTargetLowering::
 getSingleConstraintMatchWeight(AsmOperandInfo &info,
                                const char *constraint) const {
@@ -1717,12 +1725,7 @@ SDValue SystemZTargetLowering::LowerAsmOutputForConstraint(
     Chain = Glue.getValue(1);
   } else
     Glue = DAG.getCopyFromReg(Chain, DL, SystemZ::CC, MVT::i32);
-
-  SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
-  SDValue CC = DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
-                           DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
-
-  return CC;
+  return getCCResult(DAG, Glue);
 }
 
 void SystemZTargetLowering::LowerAsmOperandForConstraint(
@@ -5227,14 +5230,6 @@ SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
                                  Node->getMemoryVT(), Node->getMemOperand());
 }
 
-// Convert condition code in CCReg to an i32 value.
-static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) {
-  SDLoc DL(CCReg);
-  SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
-  return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
-                     DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
-}
-
 SDValue
 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
                                               SelectionDAG &DAG) const {
@@ -8082,7 +8077,7 @@ SDValue SystemZTargetLowering::combineBSWAP(
 }
 
 // Combine IPM sequence for flag output operands.
-static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
+static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
   // Convert CCVal to CCMask and update it along with  CCValid.
   const auto convertCCValToCCMask = [&CCMask, &CCValid](int CCVal) {
     bool Invert = false;
@@ -8121,6 +8116,8 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     if (!IPMOp0 || IPMOp0->getNumOperands() < 2)
       return false;
     auto *RN = dyn_cast<RegisterSDNode>(IPMOp0->getOperand(1));
+    // Check if operand 1 is SystemZ::CC. Also, it avoids srl/ipm/tbegin and
+    // srl/ipm/tend kind of sequences.
     if (!RN || !RN->getReg().isPhysical() || RN->getReg() != SystemZ::CC)
       return false;
     // Return the updated CCReg link.
@@ -8177,7 +8174,7 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
           return false;
         int CCValidVal = CCValid1->getZExtValue();
         int CCMaskVal = CCMask1->getZExtValue();
-        if (combineCCIPMMask(XORReg, CCValidVal, CCMaskVal)) {
+        if (combineSRL_IPM_CCMask(XORReg, CCValidVal, CCMaskVal)) {
           // CC == 0 || CC == 2 for bit 28 Test Under Mask.
           CCMask = SystemZ::CCMASK_CMP_GE;
           CCMask ^= CCMaskVal;
@@ -8217,7 +8214,7 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     int CCValidVal = CCValidNode->getZExtValue();
     int CCMaskVal = CCMaskNode->getZExtValue();
     SDValue CCRegOp = CCNode->getOperand(4);
-    if (combineCCIPMMask(CCRegOp, CCValidVal, CCMaskVal) ||
+    if (combineSRL_IPM_CCMask(CCRegOp, CCValidVal, CCMaskVal) ||
         isCCOperand(CCRegOp.getNode())) {
       CCMask = CCMaskVal;
       CCValid = SystemZ::CCMASK_ANY;
@@ -8252,8 +8249,8 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
       int CCMaskVal2 = CCMask2->getZExtValue();
       SDValue CCReg1 = XOROp1->getOperand(4);
       SDValue CCReg2 = XOROp2->getOperand(4);
-      if (!combineCCIPMMask(CCReg1, CCValidVal1, CCMaskVal1) ||
-          !combineCCIPMMask(CCReg2, CCValidVal2, CCMaskVal2))
+      if (!combineSRL_IPM_CCMask(CCReg1, CCValidVal1, CCMaskVal1) ||
+          !combineSRL_IPM_CCMask(CCReg2, CCValidVal2, CCMaskVal2))
         return false;
       CCMask = CCMaskVal1 ^ CCMaskVal2;
       CCReg = CCReg1;
@@ -8280,8 +8277,8 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     SDValue CmpOp2 = CCNode->getOperand(1);
     int CCValid1 = CCValid, CCValid2 = CCValid;
     int CCMask1 = CCMask, CCMask2 = CCMask;
-    bool IsOp1 = combineCCIPMMask(CmpOp1, CCValid1, CCMask1);
-    bool IsOp2 = combineCCIPMMask(CmpOp2, CCValid2, CCMask2);
+    bool IsOp1 = combineSRL_IPM_CCMask(CmpOp1, CCValid1, CCMask1);
+    bool IsOp2 = combineSRL_IPM_CCMask(CmpOp2, CCValid2, CCMask2);
     if (IsOp1 && IsOp2) {
       CCMask = CCMask1 ^ CCMask2;
       CCReg = CmpOp1;
@@ -8300,7 +8297,7 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     if (CCMask == SystemZ::CCMASK_CMP_NE)
       Invert = !Invert;
     SDValue NewCCReg = CCNode->getOperand(0);
-    if (combineCCIPMMask(NewCCReg, CCValid, CCMask)) {
+    if (combineSRL_IPM_CCMask(NewCCReg, CCValid, CCMask)) {
       CCMask |= Mask;
       if (Invert)
         CCMask ^= SystemZ::CCMASK_ANY;
@@ -8320,8 +8317,8 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     SDValue OrOp2 = LHS->getOperand(1);
     int NewCCMask1 = CCMask, NewCCMask2 = CCMask, NewCCMask = CCMask;
     if (!isa<ConstantSDNode>(OrOp1) && !isa<ConstantSDNode>(OrOp2)) {
-      bool IsOp1 = combineCCIPMMask(OrOp1, CCValid, NewCCMask1);
-      bool IsOp2 = combineCCIPMMask(OrOp2, CCValid, NewCCMask2);
+      bool IsOp1 = combineSRL_IPM_CCMask(OrOp1, CCValid, NewCCMask1);
+      bool IsOp2 = combineSRL_IPM_CCMask(OrOp2, CCValid, NewCCMask2);
       if (!IsOp1 && !IsOp2) {
         CCValid = RestoreCCValid;
         return false;
@@ -8364,8 +8361,8 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     int NewCCMask2 = CCMask;
     int NewCCMask;
     if (!isa<ConstantSDNode>(AndOp1) && !isa<ConstantSDNode>(AndOp2)) {
-      bool IsOp1 = combineCCIPMMask(AndOp1, CCValid, NewCCMask1);
-      bool IsOp2 = combineCCIPMMask(AndOp2, CCValid, NewCCMask2);
+      bool IsOp1 = combineSRL_IPM_CCMask(AndOp1, CCValid, NewCCMask1);
+      bool IsOp2 = combineSRL_IPM_CCMask(AndOp2, CCValid, NewCCMask2);
       if (!IsOp1 && !IsOp2) {
         CCValid = RestoreCCValid;
         return false;
@@ -8455,7 +8452,7 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     if (CCMask == SystemZ::CCMASK_CMP_NE)
       Invert = !Invert;
     // If both the operands are select_cc.
-    if (combineCCIPMMask(XORReg, CCValid, CCMask)) {
+    if (combineSRL_IPM_CCMask(XORReg, CCValid, CCMask)) {
       CCReg = XORReg;
       CCValid = SystemZ::CCMASK_ANY;
       return true;
@@ -8479,8 +8476,8 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
       SDValue XORReg1 = XOROp->getOperand(4);
       SDValue XORReg2 = LHS->getOperand(1);
       int CCMaskVal1 = CCMaskVal, CCMaskVal2 = CCMaskVal;
-      if (combineCCIPMMask(XORReg1, CCValidVal, CCMaskVal1) &&
-          combineCCIPMMask(XORReg2, CCValidVal, CCMaskVal2)) {
+      if (combineSRL_IPM_CCMask(XORReg1, CCValidVal, CCMaskVal1) &&
+          combineSRL_IPM_CCMask(XORReg2, CCValidVal, CCMaskVal2)) {
         CCMask = CCMaskVal1 ^ CCMaskVal2;
         CCReg = XORReg1;
         CCValid = SystemZ::CCMASK_ANY;
@@ -8493,6 +8490,17 @@ static bool combineCCIPMMask(SDValue &CCReg, int &CCValid, int &CCMask) {
 }
 
 static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
+  // combineSRL_IPM_CCMask tries to combine srl/ipm/cc sequence.
+  // This sequence here seems to be only for flag output operand.
+  // IPM operand has physical operand SystemZ::CC and CCValid is 15.
+  if (combineSRL_IPM_CCMask(CCReg, CCValid, CCMask))
+    return true;
+
+  // Code for SELECT_CCMASK does not seem to have ipm sequence.
+  // There is one case with sra/ipm that does not have SystemZ::CC as an
+  // operand. Test cases for sra/ipm are bcmp.ll, memcmp-01.ll and
+  // strcmp-01.ll. These tests have sra/sll/ipm/clc sequence.
+
   // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
   // set by the CCReg instruction using the CCValid / CCMask masks,
   // If the CCReg instruction is itself a ICMP testing the condition
@@ -8545,7 +8553,6 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     CCReg = CompareLHS->getOperand(4);
     return true;
   }
-
   // Optimize the case where CompareRHS is (SRA (SHL (IPM))).
   if (CompareLHS->getOpcode() == ISD::SRA) {
     auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
@@ -8575,7 +8582,6 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     CCReg = IPM->getOperand(0);
     return true;
   }
-
   return false;
 }
 
@@ -8642,7 +8648,7 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
   SDValue CCReg = N->getOperand(4);
   SDValue CCRegOp = CCOpNode->getOperand(4);
   // Combine current select_cc.
-  if (combineCCIPMMask(CCReg, CCValid, CCMask)) {
+  if (combineSRL_IPM_CCMask(CCReg, CCValid, CCMask)) {
     if (InvertOp1)
       CCMask ^= SystemZ::CCMASK_ANY;
     // There are two scenarios here.
@@ -8652,7 +8658,7 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
     // SELECT_CCMASK. Check for isCCOperand. In this case we will not know
     // original CCMask, but if only one bit is set in CCMaskValOp, that means
     // original CCMask was SystemZ::CCMASK_CMP_EQ.
-    if (!combineCCIPMMask(CCRegOp, CCValidValOp, CCMaskValOp) &&
+    if (!combineSRL_IPM_CCMask(CCRegOp, CCValidValOp, CCMaskValOp) &&
         !isCCOperand(CCRegOp.getNode()))
       return std::nullopt;
     // If outer SELECT_CCMASK is CCMASK_CMP_EQ or single bit is set in
@@ -8722,9 +8728,7 @@ SDValue SystemZTargetLowering::combineBR_CCMASK(
   SDValue Chain = N->getOperand(0);
   SDValue CCReg = N->getOperand(4);
 
-  // combineCCIPMMask tries to combine srl/ipm sequence for flag output operand.
-  if (combineCCIPMMask(CCReg, CCValidVal, CCMaskVal) ||
-      combineCCMask(CCReg, CCValidVal, CCMaskVal))
+  if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
     return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0),
                        Chain,
                        DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32),
@@ -8753,9 +8757,7 @@ SDValue SystemZTargetLowering::combineSELECT_CCMASK(
   int CCMaskVal = CCMask->getZExtValue();
   SDValue CCReg = N->getOperand(4);
 
-  // combineCCIPMMask tries to combine srl/ipm sequence for flag output operand.
-  if (combineCCIPMMask(CCReg, CCValidVal, CCMaskVal) ||
-      combineCCMask(CCReg, CCValidVal, CCMaskVal))
+  if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
     return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
                        N->getOperand(0), N->getOperand(1),
                        DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32),

>From 7ea0c5f7f62f5873f48108ae60097c95f23f33ac Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Tue, 25 Feb 2025 03:20:01 +0100
Subject: [PATCH 07/12] - Changes relating to ConstraintInfo for setting flag
 output CC upper bound   for all backend suuporting flag output operand (X86,
 AARCH64 and SystemZ). - Remove all changes target specific changes from
 SelectionDAGBuiler.cpp. - Added getJumpConditionMergingParams for SystemZ for
 setting cost for   merging srl/ipm/cc. - TODO: Handle the cases where
 simplifyBranchOnICmpChain creates switch table   while folding branch on
 And'd or Or'd chain of icmp instructions.

---
 clang/include/clang/Basic/TargetInfo.h        |  2 +-
 clang/lib/Basic/Targets/AArch64.cpp           |  1 +
 clang/lib/Basic/Targets/SystemZ.cpp           |  2 +-
 clang/lib/Basic/Targets/SystemZ.h             |  6 +-
 clang/lib/Basic/Targets/X86.cpp               |  1 +
 clang/lib/CodeGen/CGStmt.cpp                  | 16 ++---
 llvm/include/llvm/CodeGen/TargetLowering.h    |  3 -
 .../SelectionDAG/SelectionDAGBuilder.cpp      | 58 +++----------------
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |  4 --
 .../Target/SystemZ/SystemZISelLowering.cpp    | 47 ++++++++++-----
 llvm/lib/Target/SystemZ/SystemZISelLowering.h |  6 +-
 .../SystemZ/flag_output_operand_ccand_not.ll  | 11 +++-
 .../flag_output_operand_ccmixed_eq_noteq.ll   | 45 ++++++++++++--
 .../flag_output_operand_ccmixed_not.ll        |  9 ++-
 .../SystemZ/flag_output_operand_ccor.ll       | 12 +++-
 .../SystemZ/flag_output_operand_ccxor.ll      | 14 +++--
 .../flag_output_operand_ccxor_eq_noteq.ll     |  9 ++-
 .../SystemZ/flag_output_operand_ccxor_not.ll  | 15 +++--
 18 files changed, 146 insertions(+), 115 deletions(-)

diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index d4a18ce01f6bc..15d7bd50aca25 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -1119,7 +1119,7 @@ class TargetInfo : public TransferrableTargetInfo,
   public:
     ConstraintInfo(StringRef ConstraintStr, StringRef Name)
         : Flags(0), TiedOperand(-1), ConstraintStr(ConstraintStr.str()),
-          Name(Name.str()), FlagOutputCCUpperBound(2) {
+          Name(Name.str()), FlagOutputCCUpperBound(0) {
       ImmRange.Min = ImmRange.Max = 0;
       ImmRange.isConstrained = false;
     }
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index fad8d773bfc52..7e33cba217884 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -1552,6 +1552,7 @@ bool AArch64TargetInfo::validateAsmConstraint(
     if (const unsigned Len = matchAsmCCConstraint(Name)) {
       Name += Len - 1;
       Info.setAllowsRegister();
+      Info.setFlagOutputCCUpperBound(2);
       return true;
     }
   }
diff --git a/clang/lib/Basic/Targets/SystemZ.cpp b/clang/lib/Basic/Targets/SystemZ.cpp
index c7725a8bd54a0..8f5bb70925d60 100644
--- a/clang/lib/Basic/Targets/SystemZ.cpp
+++ b/clang/lib/Basic/Targets/SystemZ.cpp
@@ -100,7 +100,7 @@ bool SystemZTargetInfo::validateAsmConstraint(
     return true;
   case '@':
     // CC condition changes.
-    if (!StringRef("@cc").compare(Name)) {
+    if (StringRef(Name) == "@cc") {
       Name += 2;
       Info.setAllowsRegister();
       Info.setFlagOutputCCUpperBound(4);
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index 6db148dc45db8..26bdf65ebc2d5 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -120,10 +120,8 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
 
   std::string convertConstraint(const char *&Constraint) const override {
     if (llvm::StringRef(Constraint) == "@cc") {
-      auto Len = llvm::StringRef("@cc").size();
-      std::string Converted = std::string("{@cc}");
-      Constraint += Len - 1;
-      return Converted;
+      Constraint += 2;
+      return std::string("{@cc}");
     }
     switch (Constraint[0]) {
     case 'p': // Keep 'p' constraint.
diff --git a/clang/lib/Basic/Targets/X86.cpp b/clang/lib/Basic/Targets/X86.cpp
index 84a05cec04e7f..e822b1ce8d089 100644
--- a/clang/lib/Basic/Targets/X86.cpp
+++ b/clang/lib/Basic/Targets/X86.cpp
@@ -1580,6 +1580,7 @@ bool X86TargetInfo::validateAsmConstraint(
     if (auto Len = matchAsmCCConstraint(Name)) {
       Name += Len - 1;
       Info.setAllowsRegister();
+      Info.setFlagOutputCCUpperBound(2);
       return true;
     }
     return false;
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 2055de97a5864..f7f322c163646 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -2601,7 +2601,7 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
               const llvm::ArrayRef<LValue> ResultRegDests,
               const llvm::ArrayRef<QualType> ResultRegQualTys,
               const llvm::BitVector &ResultTypeRequiresCast,
-              const llvm::BitVector &ResultRegIsFlagReg) {
+              const std::vector<unsigned> &ResultRegIsFlagReg) {
   CGBuilderTy &Builder = CGF.Builder;
   CodeGenModule &CGM = CGF.CGM;
   llvm::LLVMContext &CTX = CGF.getLLVMContext();
@@ -2628,14 +2628,7 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
       // observed for select_cc on SystemZ unit tests for flag output operands.
       // For some cases for br_cc, generated IR was weird. e.g. switch table
       // for simple simple comparison terms for br_cc.
-      StringRef Name;
-      if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
-        Name = GAS->getOutputName(i);
-      TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
-      bool IsValid = CGF.getTarget().validateOutputConstraint(Info);
-      (void)IsValid;
-      assert(IsValid && "Failed to parse flag output operand constraint");
-      unsigned CCUpperBound = Info.getFlagOutputCCUpperBound();
+      unsigned CCUpperBound = ResultRegIsFlagReg[i];
       llvm::Constant *CCUpperBoundConst =
           llvm::ConstantInt::get(Tmp->getType(), CCUpperBound);
       llvm::Value *IsBooleanValue =
@@ -2766,7 +2759,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
   std::vector<llvm::Type *> ArgElemTypes;
   std::vector<llvm::Value*> Args;
   llvm::BitVector ResultTypeRequiresCast;
-  llvm::BitVector ResultRegIsFlagReg;
+  std::vector<unsigned> ResultRegIsFlagReg;
 
   // Keep track of inout constraints.
   std::string InOutConstraints;
@@ -2824,8 +2817,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
       ResultRegQualTys.push_back(QTy);
       ResultRegDests.push_back(Dest);
 
-      bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
-      ResultRegIsFlagReg.push_back(IsFlagReg);
+      ResultRegIsFlagReg.push_back(Info.getFlagOutputCCUpperBound());
 
       llvm::Type *Ty = ConvertTypeForMem(QTy);
       const bool RequiresCast = Info.allowsRegister() &&
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 305ef4c7bea4c..bbecc7a6ddaee 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -5107,9 +5107,6 @@ class TargetLowering : public TargetLoweringBase {
                                             std::vector<SDValue> &Ops,
                                             SelectionDAG &DAG) const;
 
-  // Lower switch statement for flag output operand with SRL/IPM Sequence.
-  virtual bool canLowerSRL_IPM_Switch(SDValue Cond) const;
-
   // Lower custom output constraints. If invalid, return SDValue().
   virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue,
                                               const SDLoc &DL,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index dbb9ac74962be..8bfe49020ee7b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2837,34 +2837,7 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
       Opcode = Instruction::And;
     else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
       Opcode = Instruction::Or;
-    auto &TLI = DAG.getTargetLoweringInfo();
-    const auto checkSRLIPM = [&TLI](const SDValue &Op) {
-      if (!Op.getNumOperands())
-        return false;
-      SDValue OpVal = Op.getOperand(0);
-      SDNode *N = OpVal.getNode();
-      if (N && N->getOpcode() == ISD::SRL)
-        return TLI.canLowerSRL_IPM_Switch(OpVal);
-      else if (N && OpVal.getNumOperands() &&
-               (N->getOpcode() == ISD::AND || N->getOpcode() == ISD::OR)) {
-        SDValue OpVal1 = OpVal.getOperand(0);
-        SDNode *N1 = OpVal1.getNode();
-        if (N1 && N1->getOpcode() == ISD::SRL)
-          return TLI.canLowerSRL_IPM_Switch(OpVal1);
-      }
-      return false;
-    };
-    // Incoming IR here is straight line code, FindMergedConditions splits
-    // condition code sequence across Basic Block. DAGCombiner can't combine
-    // across Basic Block. Identify SRL/IPM/CC sequence for SystemZ and avoid
-    // transformation in FindMergedConditions.
-    bool BrSrlIPM = false;
-    if (NodeMap.count(BOp0) && NodeMap[BOp0].getNode()) {
-      BrSrlIPM |= checkSRLIPM(getValue(BOp0));
-      if (NodeMap.count(BOp1) && NodeMap[BOp1].getNode())
-        BrSrlIPM &= checkSRLIPM(getValue(BOp1));
-    }
-    if (Opcode && !BrSrlIPM &&
+    if (Opcode &&
         !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
           match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value()))) &&
         !shouldKeepJumpConditionsTogether(
@@ -12138,36 +12111,19 @@ void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
       const APInt &SmallValue = Small.Low->getValue();
       const APInt &BigValue = Big.Low->getValue();
 
-      // Incoming IR is switch table.Identify SRL/IPM/CC sequence for SystemZ
-      // and we want to avoid splitting condition code sequence across basic
-      // block for cases like (CC == 0) || (CC == 2) || (CC == 3), or
-      // (CC == 0) || (CC == 1) ^ (CC == 3), there could potentially be
-      // more cases like this.
-      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-      bool IsSrlIPM = false;
-      if (NodeMap.count(Cond) && NodeMap[Cond].getNode())
-        IsSrlIPM = TLI.canLowerSRL_IPM_Switch(getValue(Cond));
       // Check that there is only one bit different.
       APInt CommonBit = BigValue ^ SmallValue;
-      if (CommonBit.isPowerOf2() || IsSrlIPM) {
+      if (CommonBit.isPowerOf2()) {
         SDValue CondLHS = getValue(Cond);
         EVT VT = CondLHS.getValueType();
         SDLoc DL = getCurSDLoc();
         SDValue Cond;
 
-        if (CommonBit.isPowerOf2()) {
-          SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
-                                   DAG.getConstant(CommonBit, DL, VT));
-          Cond = DAG.getSetCC(DL, MVT::i1, Or,
-                              DAG.getConstant(BigValue | SmallValue, DL, VT),
-                              ISD::SETEQ);
-        } else if (IsSrlIPM && BigValue == 3 && SmallValue == 0) {
-          SDValue SetCC =
-              DAG.getSetCC(DL, MVT::i32, CondLHS,
-                           DAG.getConstant(SmallValue, DL, VT), ISD::SETEQ);
-          Cond = DAG.getSetCC(DL, MVT::i32, SetCC,
-                              DAG.getConstant(BigValue, DL, VT), ISD::SETEQ);
-        }
+        SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
+                                 DAG.getConstant(CommonBit, DL, VT));
+        Cond = DAG.getSetCC(DL, MVT::i1, Or,
+                            DAG.getConstant(BigValue | SmallValue, DL, VT),
+                            ISD::SETEQ);
 
         // Update successor info.
         // Both Small and Big will jump to Small.BB, so we sum up the
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 288639beacba7..adfb96041c5c0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -5576,10 +5576,6 @@ const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
   return nullptr;
 }
 
-bool TargetLowering::canLowerSRL_IPM_Switch(SDValue Cond) const {
-  return false;
-}
-
 SDValue TargetLowering::LowerAsmOutputForConstraint(
     SDValue &Chain, SDValue &Glue, const SDLoc &DL,
     const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 94ae61d6d992c..f656401f45876 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -24,6 +24,7 @@
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/IntrinsicsS390.h"
+#include "llvm/IR/PatternMatch.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/KnownBits.h"
@@ -8694,23 +8695,37 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
   return std::nullopt;
 }
 
-bool SystemZTargetLowering::canLowerSRL_IPM_Switch(SDValue Cond) const {
-  auto *SRL = Cond.getNode();
-  if (!SRL || SRL->getOpcode() != ISD::SRL)
-    return false;
-  auto *SRLCount = dyn_cast<ConstantSDNode>(SRL->getOperand(1));
-  if (!SRLCount || SRLCount->getZExtValue() != SystemZ::IPM_CC)
-    return false;
-  auto *IPM = SRL->getOperand(0).getNode();
-  if (!IPM || IPM->getOpcode() != SystemZISD::IPM)
-    return false;
-  auto IPMOp0 = IPM->getOperand(0).getNode();
-  if (!IPMOp0 || IPMOp0->getNumOperands() < 2)
-    return false;
-  auto RN = dyn_cast<RegisterSDNode>(IPMOp0->getOperand(1));
-  if (!RN || !RN->getReg().isPhysical() || RN->getReg() != SystemZ::CC)
+// Merging versus split in multiple branches cost.
+TargetLoweringBase::CondMergingParams
+SystemZTargetLowering::getJumpConditionMergingParams(Instruction::BinaryOps Opc,
+                                                     const Value *Lhs,
+                                                     const Value *Rhs) const {
+  const auto isFlagOutOpCC = [](const Value *V) {
+    using namespace llvm::PatternMatch;
+    const Value *RHSVal;
+    const APInt *RHSC;
+    if (const auto *I = dyn_cast<Instruction>(V)) {
+      if (match(I->getOperand(0), m_And(m_Value(RHSVal), m_APInt(RHSC))) ||
+          match(I, m_Cmp(m_Value(RHSVal), m_APInt(RHSC)))) {
+        if (const auto *CB = dyn_cast<CallBase>(RHSVal)) {
+          if (CB->isInlineAsm()) {
+            const InlineAsm *IA = cast<InlineAsm>(CB->getCalledOperand());
+            return IA &&
+                   IA->getConstraintString().find("{@cc}") != std::string::npos;
+          }
+        }
+      }
+    }
     return false;
-  return true;
+  };
+  // Pattern (ICmp %asm) or (ICmp (And %asm)).
+  // Cost of longest dependency chain (ICmp, And) is 2. CostThreshold or
+  // BaseCost can be set >=2. If cost of instruction <= CostThreshold
+  // conditionals will be merged or else conditionals will be split.
+  if (isFlagOutOpCC(Lhs) && isFlagOutOpCC(Rhs))
+    return {3, 0, -1};
+  // Default.
+  return {-1, -1, -1};
 }
 
 SDValue SystemZTargetLowering::combineBR_CCMASK(
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 7275c972ef6e8..70c742839b3bd 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -521,8 +521,10 @@ class SystemZTargetLowering : public TargetLowering {
 
   const char *getTargetNodeName(unsigned Opcode) const override;
 
-  // Check for if flag output operands has SRL/IPM Sequence.
-  bool canLowerSRL_IPM_Switch(SDValue Cond) const override;
+  // This function currently returns cost for srl/ipm/cc sequence for merging.
+  CondMergingParams
+  getJumpConditionMergingParams(Instruction::BinaryOps Opc, const Value *Lhs,
+                                const Value *Rhs) const override;
 
   // Handle Lowering flag assembly outputs.
   SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_not.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_not.ll
index 766bb07eef209..29afcdbca4849 100644
--- a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_not.ll
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccand_not.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; Test Flag Output Operands with 14 combinations of CCMASK and optimizations.
 ; This test negate of flag_output_operand_ccand, e.g
-; CC != 0 && cc !- 1 && cc != 2 for AND for 3 three different functions, 
+; CC != 0 && cc != 1 && cc != 2 for AND for three different functions,
 ; including two test cases from heiko.
 
 ; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
@@ -555,6 +555,8 @@ if.end:                                           ; preds = %if.then, %entry
 }
 
 ; Test CC != 0 && CC != 3.
+; TODO: DAGCombiner is not able to optimize srl/ipm/cc sequence because of
+; switch table created by simplifyBranchOnICmpChain.
 define void @bar_03(){
 ; CHECK-LABEL: bar_03:
 ; CHECK:       # %bb.0: # %entry
@@ -563,8 +565,13 @@ define void @bar_03(){
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB22_1: # %entry
+; CHECK-NEXT:    srl %r0, 28
+; CHECK-NEXT:    chi %r0, 3
 ; CHECK-NEXT:    jglh dummy at PLT
-; CHECK-NEXT:  .LBB22_1: # %if.end
+; CHECK-NEXT:  .LBB22_2: # %if.end
 ; CHECK-NEXT:    br %r14
 entry:
   %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
index 3c071709b2e2a..cce7e1150aa95 100644
--- a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
@@ -2269,6 +2269,8 @@ if.end:                                           ; preds = %entry, %if.then
 }
 
 ; Test (cc == 0) && ((cc != 1) ^ (cc != 3))
+; TODO: DAGCombiner is not able to optimize srl/ipm/cc sequence because of
+; switch table created by simplifyBranchOnICmpChain.
 define i64 @bar1a_013_XOR_AND_XOR_a() {
 ; CHECK-LABEL: bar1a_013_XOR_AND_XOR_a:
 ; CHECK:       # %bb.0: # %entry
@@ -2277,8 +2279,13 @@ define i64 @bar1a_013_XOR_AND_XOR_a() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB96_1: # %entry
+; CHECK-NEXT:    srl %r0, 28
+; CHECK-NEXT:    chi %r0, 3
 ; CHECK-NEXT:    jglh dummy at PLT
-; CHECK-NEXT:  .LBB96_1: # %if.end
+; CHECK-NEXT:  .LBB96_2: # %if.end
 ; CHECK-NEXT:    br %r14
 entry:
   %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
@@ -2315,6 +2322,8 @@ entry:
 }
 
 ; Test ((cc == 0) && (cc != 2)) ^ (cc != 3)
+; TODO: DAGCombiner is not able to optimize srl/ipm/cc sequence because of
+; switch table created by simplifyBranchOnICmpChain.
 define i64 @bar1a_023_XOR_AND() {
 ; CHECK-LABEL: bar1a_023_XOR_AND:
 ; CHECK:       # %bb.0: # %entry
@@ -2323,8 +2332,13 @@ define i64 @bar1a_023_XOR_AND() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB98_1: # %entry
+; CHECK-NEXT:    srl %r0, 28
+; CHECK-NEXT:    chi %r0, 3
 ; CHECK-NEXT:    jglh dummy at PLT
-; CHECK-NEXT:  .LBB98_1: # %if.end
+; CHECK-NEXT:  .LBB98_2: # %if.end
 ; CHECK-NEXT:    br %r14
 entry:
   %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
@@ -2344,6 +2358,8 @@ if.end:                                           ; preds = %entry, %entry, %if.
 }
 
 ; Test ((cc == 0) && (cc != 2)) ^ (cc != 3)
+; TODO: DAGCombiner is not able to optimize srl/ipm/cc sequence because of
+; switch table created by simplifyBranchOnICmpChain.
 define i64 @bar1a_023_AND_XOR_a() {
 ; CHECK-LABEL: bar1a_023_AND_XOR_a:
 ; CHECK:       # %bb.0: # %entry
@@ -2352,8 +2368,13 @@ define i64 @bar1a_023_AND_XOR_a() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB99_1: # %entry
+; CHECK-NEXT:    srl %r0, 28
+; CHECK-NEXT:    chi %r0, 3
 ; CHECK-NEXT:    jglh dummy at PLT
-; CHECK-NEXT:  .LBB99_1: # %if.end
+; CHECK-NEXT:  .LBB99_2: # %if.end
 ; CHECK-NEXT:    br %r14
 entry:
   %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
@@ -4969,6 +4990,8 @@ if.end:                                           ; preds = %entry, %if.then
 }
 
 ; Test (cc == 0) ^ ((cc == 1) || (cc != 3))
+; TODO: DAGCombiner is not able to optimize srl/ipm/cc sequence because of
+; switch table created by simplifyBranchOnICmpChain.
 define i64 @bar4a_013_XOR_OR_a() {
 ; CHECK-LABEL: bar4a_013_XOR_OR_a:
 ; CHECK:       # %bb.0: # %entry
@@ -4977,8 +5000,13 @@ define i64 @bar4a_013_XOR_OR_a() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB206_1: # %entry
+; CHECK-NEXT:    srl %r0, 28
+; CHECK-NEXT:    chi %r0, 3
 ; CHECK-NEXT:    jglh dummy at PLT
-; CHECK-NEXT:  .LBB206_1: # %if.end
+; CHECK-NEXT:  .LBB206_2: # %if.end
 ; CHECK-NEXT:    br %r14
 entry:
   %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
@@ -5082,6 +5110,8 @@ if.end:                                           ; preds = %if.then, %entry
 }
 
 ; Test (cc == 0) ^ ((cc == 2) || (cc != 3))
+; TODO: DAGCombiner is not able to optimize srl/ipm/cc sequence because of
+; switch table created by simplifyBranchOnICmpChain.
 define i64 @bar4a_023_XOR_OR_a() {
 ; CHECK-LABEL: bar4a_023_XOR_OR_a:
 ; CHECK:       # %bb.0: # %entry
@@ -5090,8 +5120,13 @@ define i64 @bar4a_023_XOR_OR_a() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB210_1: # %entry
+; CHECK-NEXT:    srl %r0, 28
+; CHECK-NEXT:    chi %r0, 3
 ; CHECK-NEXT:    jglh dummy at PLT
-; CHECK-NEXT:  .LBB210_1: # %if.end
+; CHECK-NEXT:  .LBB210_2: # %if.end
 ; CHECK-NEXT:    br %r14
 entry:
   %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_not.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_not.ll
index 84b8858afc8ad..629497ca6ceaf 100644
--- a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_not.ll
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_not.ll
@@ -1464,6 +1464,8 @@ if.end:                                           ; preds = %if.then, %entry
 }
 
 ; Test ((cc == 0) ^ (cc != 1)) && (cc != 3).
+; TODO: DAGCombiner is not able to optimize srl/ipm/cc sequence because of
+; switch table created by simplifyBranchOnICmpChain.
 define i64 @bar1_012_AND_XOR_b() {
 ; CHECK-LABEL: bar1_012_AND_XOR_b:
 ; CHECK:       # %bb.0: # %entry
@@ -1472,8 +1474,13 @@ define i64 @bar1_012_AND_XOR_b() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB62_1: # %entry
+; CHECK-NEXT:    srl %r0, 28
+; CHECK-NEXT:    chi %r0, 3
 ; CHECK-NEXT:    jglh dummy at PLT
-; CHECK-NEXT:  .LBB62_1: # %if.end
+; CHECK-NEXT:  .LBB62_2: # %if.end
 ; CHECK-NEXT:    br %r14
 entry:
   %0 = tail call i32 asm "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor.ll
index 9b51380ac4c09..d793949847820 100644
--- a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor.ll
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccor.ll
@@ -823,6 +823,8 @@ if.end:                                           ; preds = %if.then, %entry
 }
 
 ; Test CC == 0 || CC == 3.
+; TODO: DAGCombiner is not able to optimize srl/ipm/cc sequence because of
+; switch table created by simplifyBranchOnICmpChain.
 define void @bar_03() {
 ; CHECK-LABEL: bar_03:
 ; CHECK:       # %bb.0: # %entry
@@ -831,9 +833,13 @@ define void @bar_03() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    jgnlh dummy at PLT
-; CHECK-NEXT:  .LBB33_1: # %if.end
-; CHECK-NEXT:    br %r14
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB33_1: # %entry
+; CHECK-NEXT:    srl %r0, 28
+; CHECK-NEXT:    ciblh %r0, 0, 0(%r14)
+; CHECK-NEXT:  .LBB33_2: # %if.then
+; CHECK-NEXT:    jg dummy at PLT
 entry:
   %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
   %1 = icmp ult i32 %0, 4
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor.ll
index 0a2a508000430..63c4b506458ea 100644
--- a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor.ll
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; Test Flag Output Operands with 14 combinations of CCMASK and optimizations
-; for XOR for 3 three different functions, including two test cases from heiko.
+; for XOR for three different functions, including two test cases from heiko.
 
 ; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
 
@@ -559,6 +559,8 @@ if.end:                                           ; preds = %if.then, %entry
 }
 
 ; Test CC == 0 ^ CC == 3.
+; TODO: DAGCombiner is not able to optimize srl/ipm/cc sequence because of
+; switch table created by simplifyBranchOnICmpChain.
 define void @bar_03() {
 ; CHECK-LABEL: bar_03:
 ; CHECK:       # %bb.0: # %entry
@@ -567,9 +569,13 @@ define void @bar_03() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    jgnlh dummy at PLT
-; CHECK-NEXT:  .LBB22_1: # %if.end
-; CHECK-NEXT:    br %r14
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB22_1: # %entry
+; CHECK-NEXT:    srl %r0, 28
+; CHECK-NEXT:    ciblh %r0, 0, 0(%r14)
+; CHECK-NEXT:  .LBB22_2: # %if.then
+; CHECK-NEXT:    jg dummy at PLT
 entry:
   %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
   %1 = icmp ult i32 %0, 4
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_eq_noteq.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_eq_noteq.ll
index bdbe5ff3ae924..de953265d82ee 100644
--- a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_eq_noteq.ll
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_eq_noteq.ll
@@ -746,6 +746,8 @@ if.end:                                           ; preds = %if.then, %entry
 }
 
 ; Test CC == 0 ^ CC != 3.
+; TODO: DAGCombiner is not able to optimize srl/ipm/cc sequence because of
+; switch table created by simplifyBranchOnICmpChain.
 define void @bar_03() {
 ; CHECK-LABEL: bar_03:
 ; CHECK:       # %bb.0: # %entry
@@ -754,8 +756,13 @@ define void @bar_03() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    ber %r14
+; CHECK-NEXT:  .LBB30_1: # %entry
+; CHECK-NEXT:    srl %r0, 28
+; CHECK-NEXT:    chi %r0, 3
 ; CHECK-NEXT:    jglh dummy at PLT
-; CHECK-NEXT:  .LBB30_1: # %if.end
+; CHECK-NEXT:  .LBB30_2: # %if.end
 ; CHECK-NEXT:    br %r14
 entry:
   %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_not.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_not.ll
index 47410d28f80e3..d0bea528cbf37 100644
--- a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_not.ll
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccxor_not.ll
@@ -1,10 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; Test Flag Output Operands with 14 combinations of CCMASK and optimizations
-; for XOR for 3 three different functions, including two test cases from heiko.
+; for XOR for three different functions, including two test cases from heiko.
 ; This test checks NOT EQUAL (!=), e.g.  CC != 0 ^ CC != 1 ^ CC != 2.
 
 ; RUN: llc < %s -verify-machineinstrs -mtriple=s390x-linux-gnu -O2 | FileCheck %s
-
 ; Test CC != 0 ^ CC != 1.
 define signext range(i32 0, 43) i32 @foo_01(i32 noundef signext %x) {
 ; CHECK-LABEL: foo_01:
@@ -552,6 +551,8 @@ if.end:                                           ; preds = %if.then, %entry
 }
 
 ; Test CC != 0 ^ CC != 3.
+; TODO: DAGCombiner is not able to optimize srl/ipm/cc sequence because of
+; switch table created by simplifyBranchOnICmpChain.
 define void @bar_03() {
 ; CHECK-LABEL: bar_03:
 ; CHECK:       # %bb.0: # %entry
@@ -560,9 +561,13 @@ define void @bar_03() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    jgnlh dummy at PLT
-; CHECK-NEXT:  .LBB22_1: # %if.end
-; CHECK-NEXT:    br %r14
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    jgo dummy at PLT
+; CHECK-NEXT:  .LBB22_1: # %entry
+; CHECK-NEXT:    srl %r0, 28
+; CHECK-NEXT:    ciblh %r0, 0, 0(%r14)
+; CHECK-NEXT:  .LBB22_2: # %if.then
+; CHECK-NEXT:    jg dummy at PLT
 entry:
   %0 = tail call i32 asm sideeffect "       alsi    $1,-1\0A", "={@cc},=*QS,*QS,~{memory}"(ptr nonnull elementtype(i32) @a, ptr nonnull elementtype(i32) @a) #3
   %1 = icmp ult i32 %0, 4

>From d787c8bb3530ca522c60c50db4e37915b801797e Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Tue, 22 Apr 2025 00:03:16 +0200
Subject: [PATCH 08/12] Incorporated changes for code review feedback.

---
 clang/include/clang/Basic/TargetInfo.h        |  14 +-
 clang/lib/Basic/Targets/SystemZ.cpp           |   1 +
 clang/lib/Basic/Targets/SystemZ.h             |  10 +-
 clang/lib/CodeGen/CGStmt.cpp                  |  26 +-
 .../SelectionDAG/SelectionDAGBuilder.cpp      |   7 +-
 .../Target/SystemZ/SystemZISelLowering.cpp    | 345 ++++++++----------
 llvm/lib/Target/SystemZ/SystemZISelLowering.h |   5 +-
 llvm/test/CodeGen/SystemZ/htm-intrinsics.ll   |   4 +-
 8 files changed, 190 insertions(+), 222 deletions(-)

diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index 15d7bd50aca25..60497e273d31c 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -1114,12 +1114,11 @@ class TargetInfo : public TransferrableTargetInfo,
 
     std::string ConstraintStr;  // constraint: "=rm"
     std::string Name;           // Operand name: [foo] with no []'s.
-    unsigned FlagOutputCCUpperBound;
 
   public:
     ConstraintInfo(StringRef ConstraintStr, StringRef Name)
         : Flags(0), TiedOperand(-1), ConstraintStr(ConstraintStr.str()),
-          Name(Name.str()), FlagOutputCCUpperBound(0) {
+          Name(Name.str()) {
       ImmRange.Min = ImmRange.Max = 0;
       ImmRange.isConstrained = false;
     }
@@ -1191,13 +1190,13 @@ class TargetInfo : public TransferrableTargetInfo,
       // Don't copy Name or constraint string.
     }
 
-    // CC range can be set by target. SystemZ sets it to 4. It is 2 by default.
+    // CC range can be set by targets supporting flag output operand.
     void setFlagOutputCCUpperBound(unsigned CCBound) {
-      FlagOutputCCUpperBound = CCBound;
-    }
-    unsigned getFlagOutputCCUpperBound() const {
-      return FlagOutputCCUpperBound;
+      // Using ImmRange.Max to store CC upper bound. Interval [0, CCBound).
+      ImmRange.Max = CCBound;
+      ImmRange.isConstrained = true;
     }
+    unsigned getFlagOutputCCUpperBound() const { return ImmRange.Max; }
   };
 
   /// Validate register name used for global register variables.
@@ -1238,7 +1237,6 @@ class TargetInfo : public TransferrableTargetInfo,
                              std::string &/*SuggestedModifier*/) const {
     return true;
   }
-
   virtual bool
   validateAsmConstraint(const char *&Name,
                         TargetInfo::ConstraintInfo &info) const = 0;
diff --git a/clang/lib/Basic/Targets/SystemZ.cpp b/clang/lib/Basic/Targets/SystemZ.cpp
index 8f5bb70925d60..18cb5ea0444a2 100644
--- a/clang/lib/Basic/Targets/SystemZ.cpp
+++ b/clang/lib/Basic/Targets/SystemZ.cpp
@@ -103,6 +103,7 @@ bool SystemZTargetInfo::validateAsmConstraint(
     if (StringRef(Name) == "@cc") {
       Name += 2;
       Info.setAllowsRegister();
+      // SystemZ has 2-bits CC, and hence Interval [0, 4).
       Info.setFlagOutputCCUpperBound(4);
       return true;
     }
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index 26bdf65ebc2d5..fc9ee2cd4b693 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -119,11 +119,13 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
                              TargetInfo::ConstraintInfo &info) const override;
 
   std::string convertConstraint(const char *&Constraint) const override {
-    if (llvm::StringRef(Constraint) == "@cc") {
-      Constraint += 2;
-      return std::string("{@cc}");
-    }
     switch (Constraint[0]) {
+    case '@': // Flag output operand.
+      if (llvm::StringRef(Constraint) == "@cc") {
+        Constraint += 2;
+        return std::string("{@cc}");
+      }
+      break;
     case 'p': // Keep 'p' constraint.
       return std::string("p");
     case 'Z':
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index f7f322c163646..5f56297ae5746 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -2601,7 +2601,7 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
               const llvm::ArrayRef<LValue> ResultRegDests,
               const llvm::ArrayRef<QualType> ResultRegQualTys,
               const llvm::BitVector &ResultTypeRequiresCast,
-              const std::vector<unsigned> &ResultRegIsFlagReg) {
+              const std::vector<unsigned> &ResultFlagRegCCBound) {
   CGBuilderTy &Builder = CGF.Builder;
   CodeGenModule &CGM = CGF.CGM;
   llvm::LLVMContext &CTX = CGF.getLLVMContext();
@@ -2612,23 +2612,19 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
   // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
   // in which case its size may grow.
   assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
-  assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
+  assert(ResultFlagRegCCBound.size() <= ResultRegDests.size());
 
   for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
     llvm::Value *Tmp = RegResults[i];
     llvm::Type *TruncTy = ResultTruncRegTypes[i];
 
-    if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
+    if ((i < ResultFlagRegCCBound.size()) && ResultFlagRegCCBound[i]) {
       // Target must guarantee the Value `Tmp` here is lowered to a boolean
       // value.
-      // Lowering 'Tmp' as - 'icmp ult %Tmp , CCUpperBound'. On some targets
-      // CCUpperBound is not binary. CCUpperBound is 4 for SystemZ,
-      // interval [0, 4). With this range known, llvm.assume intrinsic guides
-      // optimizer to generate more optimized IR in most of the cases as
-      // observed for select_cc on SystemZ unit tests for flag output operands.
-      // For some cases for br_cc, generated IR was weird. e.g. switch table
-      // for simple simple comparison terms for br_cc.
-      unsigned CCUpperBound = ResultRegIsFlagReg[i];
+      // Lowering 'Tmp' as - 'icmp ult %Tmp , CCUpperBound'.
+      unsigned CCUpperBound = ResultFlagRegCCBound[i];
+      assert((CCUpperBound == 2 || CCUpperBound == 4) &&
+             "CC upper bound out of range!");
       llvm::Constant *CCUpperBoundConst =
           llvm::ConstantInt::get(Tmp->getType(), CCUpperBound);
       llvm::Value *IsBooleanValue =
@@ -2759,7 +2755,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
   std::vector<llvm::Type *> ArgElemTypes;
   std::vector<llvm::Value*> Args;
   llvm::BitVector ResultTypeRequiresCast;
-  std::vector<unsigned> ResultRegIsFlagReg;
+  std::vector<unsigned> ResultFlagRegCCBound;
 
   // Keep track of inout constraints.
   std::string InOutConstraints;
@@ -2817,7 +2813,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
       ResultRegQualTys.push_back(QTy);
       ResultRegDests.push_back(Dest);
 
-      ResultRegIsFlagReg.push_back(Info.getFlagOutputCCUpperBound());
+      ResultFlagRegCCBound.push_back(Info.getFlagOutputCCUpperBound());
 
       llvm::Type *Ty = ConvertTypeForMem(QTy);
       const bool RequiresCast = Info.allowsRegister() &&
@@ -3164,7 +3160,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
 
   EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
                 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
-                ResultRegIsFlagReg);
+                ResultFlagRegCCBound);
 
   // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
   // different insertion point; one for each indirect destination and with
@@ -3175,7 +3171,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
       Builder.SetInsertPoint(Succ, --(Succ->end()));
       EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
                     ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
-                    ResultTypeRequiresCast, ResultRegIsFlagReg);
+                    ResultTypeRequiresCast, ResultFlagRegCCBound);
     }
   }
 }
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 8bfe49020ee7b..b734b5babfbd1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -12117,13 +12117,12 @@ void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
         SDValue CondLHS = getValue(Cond);
         EVT VT = CondLHS.getValueType();
         SDLoc DL = getCurSDLoc();
-        SDValue Cond;
 
         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
                                  DAG.getConstant(CommonBit, DL, VT));
-        Cond = DAG.getSetCC(DL, MVT::i1, Or,
-                            DAG.getConstant(BigValue | SmallValue, DL, VT),
-                            ISD::SETEQ);
+        SDValue Cond = DAG.getSetCC(
+            DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
+            ISD::SETEQ);
 
         // Update successor info.
         // Both Small and Big will jump to Small.BB, so we sum up the
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index f656401f45876..749cd4cb4ef2c 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -2797,21 +2797,6 @@ static unsigned CCMaskForCondCode(ISD::CondCode CC) {
 #undef CONV
 }
 
-static unsigned CCMaskForSystemZCCVal(unsigned CC) {
-  switch (CC) {
-  default:
-    llvm_unreachable("invalid integer condition!");
-  case 0:
-    return SystemZ::CCMASK_CMP_EQ;
-  case 1:
-    return SystemZ::CCMASK_CMP_LT;
-  case 2:
-    return SystemZ::CCMASK_CMP_GT;
-  case 3:
-    return SystemZ::CCMASK_CMP_UO;
-  }
-}
-
 // If C can be converted to a comparison against zero, adjust the operands
 // as necessary.
 static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
@@ -8078,27 +8063,29 @@ SDValue SystemZTargetLowering::combineBSWAP(
 }
 
 // Combine IPM sequence for flag output operands.
-static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
-  // Convert CCVal to CCMask and update it along with  CCValid.
-  const auto convertCCValToCCMask = [&CCMask, &CCValid](int CCVal) {
+static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
+  // CCMask for ICmp is equal to 0, 1, 2 or 3.
+  const auto CCMaskForICmpEQCCVal = [](unsigned CC) {
+    assert(CC < 4 && "CC out of range");
+    return 1 << (3 - CC);
+  };
+  // Convert CCVal to CCMask and update it.
+  const auto convertCCValToCCMask = [&](int CCVal) {
     bool Invert = false;
     if (CCMask == SystemZ::CCMASK_CMP_NE)
       Invert = !Invert;
     if (CCMask == SystemZ::CCMASK_CMP_EQ || CCMask == SystemZ::CCMASK_CMP_NE) {
-      CCMask = CCMaskForSystemZCCVal(CCVal);
+      CCMask = CCMaskForICmpEQCCVal(CCVal);
       if (Invert)
         CCMask ^= SystemZ::CCMASK_ANY;
-      CCValid = SystemZ::CCMASK_ANY;
       return true;
     } else if (CCMask == SystemZ::CCMASK_CMP_LT) {
       // CC in range [0, CCVal).
       CCMask = ((~0U << (4 - CCVal)) & SystemZ::CCMASK_ANY);
-      CCValid = SystemZ::CCMASK_ANY;
       return true;
     } else if (CCMask == SystemZ::CCMASK_CMP_GT) {
       // CC in range (CCVal, 3].
       CCMask = (~(~0U << (3 - CCVal))) & SystemZ::CCMASK_ANY;
-      CCValid = SystemZ::CCMASK_ANY;
       return true;
     }
     return false;
@@ -8113,33 +8100,34 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     auto *IPM = N->getOperand(0).getNode();
     if (!IPM || IPM->getOpcode() != SystemZISD::IPM)
       return false;
-    auto *IPMOp0 = IPM->getOperand(0).getNode();
-    if (!IPMOp0 || IPMOp0->getNumOperands() < 2)
-      return false;
-    auto *RN = dyn_cast<RegisterSDNode>(IPMOp0->getOperand(1));
-    // Check if operand 1 is SystemZ::CC. Also, it avoids srl/ipm/tbegin and
-    // srl/ipm/tend kind of sequences.
-    if (!RN || !RN->getReg().isPhysical() || RN->getReg() != SystemZ::CC)
-      return false;
-    // Return the updated CCReg link.
     CCReg = IPM->getOperand(0);
     return true;
   };
-  // Check if N has SystemZ::CC operand.
-  const auto isCCOperand = [](SDNode *N) {
-    if (!N || N->getNumOperands() < 2)
+  // Check if select_cc has already been combined and uses the same ipm/cc
+  // as CCOp and return evaluated mask CCMaskVal. (SELECT_CCMASK (CC)).
+  const auto isSameCCIPMOp = [](SDValue &CCOp, SDNode *N, int &CCValidVal,
+                                int &CCMaskVal) {
+    if (!N || N->getOpcode() != SystemZISD::SELECT_CCMASK)
       return false;
-    auto *RN = dyn_cast<RegisterSDNode>(N->getOperand(1));
-    if (!RN || !RN->getReg().isPhysical() || RN->getReg() != SystemZ::CC)
+    auto *CCValidNode = dyn_cast<ConstantSDNode>(N->getOperand(2));
+    auto *CCMaskNode = dyn_cast<ConstantSDNode>(N->getOperand(3));
+    if (!CCValidNode || !CCMaskNode)
       return false;
-    return true;
-  };
 
+    CCValidVal = CCValidNode->getZExtValue();
+    // Already been combined.
+    if (CCValidVal != SystemZ::CCMASK_ANY)
+      return false;
+    CCMaskVal = CCMaskNode->getZExtValue();
+    SDValue CCRegOp = N->getOperand(4);
+    auto *CCOpNode = CCOp.getNode(), *CCRegOpNode = CCRegOp.getNode();
+    // Uses the same ipm/cc.
+    return CCOpNode && CCRegOpNode && CCOpNode == CCRegOpNode;
+  };
   auto *CCNode = CCReg.getNode();
   if (!CCNode)
     return false;
 
-  int RestoreCCValid = CCValid;
   // Optimize (TM (IPM (CC)))
   if (CCNode->getOpcode() == SystemZISD::TM) {
     bool Invert = false;
@@ -8166,8 +8154,8 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
         return false;
       // OP1. (SELECT_CCMASK (ICMP (SRL (IPM (CC))))).
       // OP2. (SRL (IPM (CC))).
-      if (XOROp1->getOpcode() == SystemZISD::SELECT_CCMASK &&
-          isSRL_IPM_CCSequence(XOROp2)) {
+      if (XOROp1->getOpcode() == SystemZISD::SELECT_CCMASK /*&&
+          isSRL_IPM_CCSequence(XOROp2)*/) {
         auto *CCValid1 = dyn_cast<ConstantSDNode>(XOROp1->getOperand(2));
         auto *CCMask1 = dyn_cast<ConstantSDNode>(XOROp1->getOperand(3));
         SDValue XORReg = XOROp1->getOperand(4);
@@ -8175,7 +8163,7 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
           return false;
         int CCValidVal = CCValid1->getZExtValue();
         int CCMaskVal = CCMask1->getZExtValue();
-        if (combineSRL_IPM_CCMask(XORReg, CCValidVal, CCMaskVal)) {
+        if (combineCCMask(XORReg, CCValidVal, CCMaskVal)) {
           // CC == 0 || CC == 2 for bit 28 Test Under Mask.
           CCMask = SystemZ::CCMASK_CMP_GE;
           CCMask ^= CCMaskVal;
@@ -8202,10 +8190,9 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
       CCValid = SystemZ::CCMASK_ANY;
       return true;
     }
-    CCValid = RestoreCCValid;
     return false;
   }
-  // (SELECT_CCMASK (CC)) or (SELECT_CCMASK (ICMP (SRL (IPM (CC)))))
+  // (SELECT_CCMASK (ICMP (SRL (IPM (CC)))))
   if (CCNode->getOpcode() == SystemZISD::SELECT_CCMASK) {
     auto *CCValidNode = dyn_cast<ConstantSDNode>(CCNode->getOperand(2));
     auto *CCMaskNode = dyn_cast<ConstantSDNode>(CCNode->getOperand(3));
@@ -8215,14 +8202,12 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     int CCValidVal = CCValidNode->getZExtValue();
     int CCMaskVal = CCMaskNode->getZExtValue();
     SDValue CCRegOp = CCNode->getOperand(4);
-    if (combineSRL_IPM_CCMask(CCRegOp, CCValidVal, CCMaskVal) ||
-        isCCOperand(CCRegOp.getNode())) {
+    if (combineCCMask(CCRegOp, CCValidVal, CCMaskVal)) {
       CCMask = CCMaskVal;
       CCValid = SystemZ::CCMASK_ANY;
       CCReg = CCRegOp;
       return true;
     }
-    CCValid = RestoreCCValid;
     return false;
   }
 
@@ -8250,15 +8235,14 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
       int CCMaskVal2 = CCMask2->getZExtValue();
       SDValue CCReg1 = XOROp1->getOperand(4);
       SDValue CCReg2 = XOROp2->getOperand(4);
-      if (!combineSRL_IPM_CCMask(CCReg1, CCValidVal1, CCMaskVal1) ||
-          !combineSRL_IPM_CCMask(CCReg2, CCValidVal2, CCMaskVal2))
+      if (!combineCCMask(CCReg1, CCValidVal1, CCMaskVal1) ||
+          !combineCCMask(CCReg2, CCValidVal2, CCMaskVal2))
         return false;
       CCMask = CCMaskVal1 ^ CCMaskVal2;
       CCReg = CCReg1;
       CCValid = SystemZ::CCMASK_ANY;
       return true;
     }
-    CCValid = RestoreCCValid;
     return false;
   }
 
@@ -8276,37 +8260,74 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
   if (!RHS) {
     SDValue CmpOp1 = CCNode->getOperand(0);
     SDValue CmpOp2 = CCNode->getOperand(1);
-    int CCValid1 = CCValid, CCValid2 = CCValid;
-    int CCMask1 = CCMask, CCMask2 = CCMask;
-    bool IsOp1 = combineSRL_IPM_CCMask(CmpOp1, CCValid1, CCMask1);
-    bool IsOp2 = combineSRL_IPM_CCMask(CmpOp2, CCValid2, CCMask2);
-    if (IsOp1 && IsOp2) {
-      CCMask = CCMask1 ^ CCMask2;
-      CCReg = CmpOp1;
-      CCValid = SystemZ::CCMASK_ANY;
-      return true;
+    auto *CmpNode1 = CmpOp1.getNode(), *CmpNode2 = CmpOp2.getNode();
+    if (!CmpNode1 || !CmpNode2)
+      return false;
+    if (CmpNode1->getOpcode() == SystemZISD::SELECT_CCMASK ||
+        CmpNode2->getOpcode() == SystemZISD::SELECT_CCMASK) {
+      SDValue CmpOp =
+          CmpNode1->getOpcode() == SystemZISD::SELECT_CCMASK ? CmpOp2 : CmpOp1;
+      SDNode *SelectCC = CmpNode1->getOpcode() == SystemZISD::SELECT_CCMASK
+                             ? CmpNode1
+                             : CmpNode2;
+      int CmpCCValid = CCValid, SelectCCValid = CCValid;
+      int CmpCCMask = CCMask, SelectCCMask = CCMask;
+      bool IsOp1 = combineCCMask(CmpOp, CmpCCValid, CmpCCMask);
+      bool IsOp2 = isSameCCIPMOp(CmpOp, SelectCC, SelectCCValid, SelectCCMask);
+      if (IsOp1 && IsOp2) {
+        CCMask = CmpCCMask ^ SelectCCMask;
+        CCReg = CmpOp;
+        CCValid = SystemZ::CCMASK_ANY;
+        return true;
+      }
     }
-    CCValid = RestoreCCValid;
     return false;
   }
   int CmpVal = RHS->getZExtValue();
   // (BR_CC (ICMP (SELECT_CCMASK (CC))))
   if (LHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
     int CCVal = RHS->getZExtValue();
-    int Mask = CCMaskForSystemZCCVal(CCVal);
+    int Mask = CCMaskForICmpEQCCVal(CCVal);
     bool Invert = false;
     if (CCMask == SystemZ::CCMASK_CMP_NE)
       Invert = !Invert;
     SDValue NewCCReg = CCNode->getOperand(0);
-    if (combineSRL_IPM_CCMask(NewCCReg, CCValid, CCMask)) {
+    if (combineCCMask(NewCCReg, CCValid, CCMask)) {
       CCMask |= Mask;
       if (Invert)
         CCMask ^= SystemZ::CCMASK_ANY;
       CCReg = NewCCReg;
       CCValid = SystemZ::CCMASK_ANY;
       return true;
+    } else if (CCMask == SystemZ::CCMASK_CMP_NE ||
+               CCMask != SystemZ::CCMASK_CMP_EQ) {
+      // Original combineCCMask.
+      // Verify that the ICMP compares against one of select values.
+      auto *TrueVal = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
+      if (!TrueVal)
+        return false;
+      auto *FalseVal = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
+      if (!FalseVal)
+        return false;
+      if (RHS->getAPIntValue() == FalseVal->getAPIntValue())
+        Invert = !Invert;
+      else if (RHS->getAPIntValue() != TrueVal->getAPIntValue())
+        return false;
+
+      // Compute the effective CC mask for the new branch or select.
+      auto *NewCCValid = dyn_cast<ConstantSDNode>(LHS->getOperand(2));
+      auto *NewCCMask = dyn_cast<ConstantSDNode>(LHS->getOperand(3));
+      if (!NewCCValid || !NewCCMask)
+        return false;
+      CCValid = NewCCValid->getZExtValue();
+      CCMask = NewCCMask->getZExtValue();
+      if (Invert)
+        CCMask ^= CCValid;
+
+      // Return the updated CCReg link.
+      CCReg = LHS->getOperand(4);
+      return true;
     }
-    CCValid = RestoreCCValid;
     return false;
   }
   // (BR_CC (ICMP OR ((SRL (IPM (CC))) (SELECT_CCMASK (CC)))))
@@ -8316,12 +8337,12 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
       Invert = !Invert;
     SDValue OrOp1 = LHS->getOperand(0);
     SDValue OrOp2 = LHS->getOperand(1);
+    int CCValid1 = CCValid, CCValid2 = CCValid;
     int NewCCMask1 = CCMask, NewCCMask2 = CCMask, NewCCMask = CCMask;
     if (!isa<ConstantSDNode>(OrOp1) && !isa<ConstantSDNode>(OrOp2)) {
-      bool IsOp1 = combineSRL_IPM_CCMask(OrOp1, CCValid, NewCCMask1);
-      bool IsOp2 = combineSRL_IPM_CCMask(OrOp2, CCValid, NewCCMask2);
+      bool IsOp1 = combineCCMask(OrOp1, CCValid1, NewCCMask1);
+      bool IsOp2 = combineCCMask(OrOp2, CCValid2, NewCCMask2);
       if (!IsOp1 && !IsOp2) {
-        CCValid = RestoreCCValid;
         return false;
       }
       if (IsOp1 && IsOp2) {
@@ -8344,11 +8365,12 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
         // setullt unsigned(-2), mask = 0x1100
         // setugt unsigned(-4), mask = 0x0011
         CmpVal &= 0x3;
-        if (convertCCValToCCMask(CmpVal))
+        if (convertCCValToCCMask(CmpVal)) {
+          CCValid = SystemZ::CCMASK_ANY;
           return true;
+        }
       }
     }
-    CCValid = RestoreCCValid;
     return false;
   }
   // (BR_CC (ICMP AND ((SRL (IPM (CC))) (SELECT_CCMASK (CC)))))
@@ -8358,16 +8380,13 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
       Invert = !Invert;
     SDValue AndOp1 = LHS->getOperand(0);
     SDValue AndOp2 = LHS->getOperand(1);
-    int NewCCMask1 = CCMask;
-    int NewCCMask2 = CCMask;
-    int NewCCMask;
+    int NewCCMask1 = CCMask, NewCCMask2 = CCMask, NewCCMask;
+    int CCValid1 = CCValid, CCValid2 = CCValid;
     if (!isa<ConstantSDNode>(AndOp1) && !isa<ConstantSDNode>(AndOp2)) {
-      bool IsOp1 = combineSRL_IPM_CCMask(AndOp1, CCValid, NewCCMask1);
-      bool IsOp2 = combineSRL_IPM_CCMask(AndOp2, CCValid, NewCCMask2);
-      if (!IsOp1 && !IsOp2) {
-        CCValid = RestoreCCValid;
+      bool IsOp1 = combineCCMask(AndOp1, CCValid1, NewCCMask1);
+      bool IsOp2 = combineCCMask(AndOp2, CCValid2, NewCCMask2);
+      if (!IsOp1 && !IsOp2)
         return false;
-      }
       if (IsOp1 && IsOp2) {
         NewCCMask = NewCCMask1 & NewCCMask2;
         bool IsEqualCmpVal = NewCCMask == CmpVal;
@@ -8383,10 +8402,8 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
           NewCCMask = NewCCMask1;
         else if (isSRL_IPM_CCSequence(AndOp2.getNode()) && IsOp2)
           NewCCMask = NewCCMask2;
-        else {
-          CCValid = RestoreCCValid;
+        else
           return false;
-        }
         // Bit 29 set => CC == 2 || CC == 3.
         if ((NewCCMask & 0x3) == 2)
           NewCCMask = SystemZ::CCMASK_2 | SystemZ::CCMASK_3;
@@ -8394,7 +8411,7 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
         else if ((NewCCMask & 0x3) == 1)
           NewCCMask = SystemZ::CCMASK_1 | SystemZ::CCMASK_3;
         int CCVal = RHS->getZExtValue();
-        int Mask = CCMaskForSystemZCCVal(CCVal);
+        int Mask = CCMaskForICmpEQCCVal(CCVal);
         CCMask = Mask | NewCCMask;
         if (Invert ^ CmpVal)
           CCMask ^= SystemZ::CCMASK_ANY;
@@ -8402,15 +8419,15 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
         return true;
       }
     }
-    CCValid = RestoreCCValid;
     return false;
   }
   // Optimize the case where LHS is (ICMP (SRL (IPM))).
   if (isSRL_IPM_CCSequence(LHS)) {
     unsigned CCVal = RHS->getZExtValue();
-    if (convertCCValToCCMask(CCVal))
+    if (convertCCValToCCMask(CCVal)) {
+      CCValid = SystemZ::CCMASK_ANY;
       return true;
-    CCValid = RestoreCCValid;
+    }
     return false;
   }
   if (LHS->getOpcode() == ISD::ADD) {
@@ -8438,10 +8455,10 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
         CCMask ^= SystemZ::CCMASK_CMP_EQ;
         if (Invert)
           CCMask ^= SystemZ::CCMASK_ANY;
+        CCValid = SystemZ::CCMASK_ANY;
         return true;
       }
     }
-    CCValid = RestoreCCValid;
     return false;
   }
 
@@ -8453,7 +8470,7 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     if (CCMask == SystemZ::CCMASK_CMP_NE)
       Invert = !Invert;
     // If both the operands are select_cc.
-    if (combineSRL_IPM_CCMask(XORReg, CCValid, CCMask)) {
+    if (combineCCMask(XORReg, CCValid, CCMask)) {
       CCReg = XORReg;
       CCValid = SystemZ::CCMASK_ANY;
       return true;
@@ -8477,8 +8494,8 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
       SDValue XORReg1 = XOROp->getOperand(4);
       SDValue XORReg2 = LHS->getOperand(1);
       int CCMaskVal1 = CCMaskVal, CCMaskVal2 = CCMaskVal;
-      if (combineSRL_IPM_CCMask(XORReg1, CCValidVal, CCMaskVal1) &&
-          combineSRL_IPM_CCMask(XORReg2, CCValidVal, CCMaskVal2)) {
+      if (combineCCMask(XORReg1, CCValidVal, CCMaskVal1) &&
+          combineCCMask(XORReg2, CCValidVal, CCMaskVal2)) {
         CCMask = CCMaskVal1 ^ CCMaskVal2;
         CCReg = XORReg1;
         CCValid = SystemZ::CCMASK_ANY;
@@ -8486,80 +8503,14 @@ static bool combineSRL_IPM_CCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
       }
     }
   }
-  CCValid = RestoreCCValid;
-  return false;
-}
-
-static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
-  // combineSRL_IPM_CCMask tries to combine srl/ipm/cc sequence.
-  // This sequence here seems to be only for flag output operand.
-  // IPM operand has physical operand SystemZ::CC and CCValid is 15.
-  if (combineSRL_IPM_CCMask(CCReg, CCValid, CCMask))
-    return true;
-
-  // Code for SELECT_CCMASK does not seem to have ipm sequence.
-  // There is one case with sra/ipm that does not have SystemZ::CC as an
-  // operand. Test cases for sra/ipm are bcmp.ll, memcmp-01.ll and
-  // strcmp-01.ll. These tests have sra/sll/ipm/clc sequence.
-
-  // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
-  // set by the CCReg instruction using the CCValid / CCMask masks,
-  // If the CCReg instruction is itself a ICMP testing the condition
-  // code set by some other instruction, see whether we can directly
-  // use that condition code.
-
-  // Verify that we have an ICMP against some constant.
-  if (CCValid != SystemZ::CCMASK_ICMP)
-    return false;
-  auto *ICmp = CCReg.getNode();
-  if (ICmp->getOpcode() != SystemZISD::ICMP)
-    return false;
-  auto *CompareLHS = ICmp->getOperand(0).getNode();
-  auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
-  if (!CompareRHS)
-    return false;
-
-  // Optimize the case where CompareLHS is a SELECT_CCMASK.
-  if (CompareLHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
-    // Verify that we have an appropriate mask for a EQ or NE comparison.
-    bool Invert = false;
-    if (CCMask == SystemZ::CCMASK_CMP_NE)
-      Invert = !Invert;
-    else if (CCMask != SystemZ::CCMASK_CMP_EQ)
-      return false;
-
-    // Verify that the ICMP compares against one of select values.
-    auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
-    if (!TrueVal)
-      return false;
-    auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
-    if (!FalseVal)
-      return false;
-    if (CompareRHS->getAPIntValue() == FalseVal->getAPIntValue())
-      Invert = !Invert;
-    else if (CompareRHS->getAPIntValue() != TrueVal->getAPIntValue())
-      return false;
 
-    // Compute the effective CC mask for the new branch or select.
-    auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
-    auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
-    if (!NewCCValid || !NewCCMask)
-      return false;
-    CCValid = NewCCValid->getZExtValue();
-    CCMask = NewCCMask->getZExtValue();
-    if (Invert)
-      CCMask ^= CCValid;
-
-    // Return the updated CCReg link.
-    CCReg = CompareLHS->getOperand(4);
-    return true;
-  }
-  // Optimize the case where CompareRHS is (SRA (SHL (IPM))).
-  if (CompareLHS->getOpcode() == ISD::SRA) {
-    auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
+  // Original combineCCMask.
+  // Optimize the case where RHS is (SRA (SHL (IPM))).
+  if (LHS->getOpcode() == ISD::SRA) {
+    auto *SRACount = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
     if (!SRACount || SRACount->getZExtValue() != 30)
       return false;
-    auto *SHL = CompareLHS->getOperand(0).getNode();
+    auto *SHL = LHS->getOperand(0).getNode();
     if (SHL->getOpcode() != ISD::SHL)
       return false;
     auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
@@ -8570,10 +8521,10 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
       return false;
 
     // Avoid introducing CC spills (because SRA would clobber CC).
-    if (!CompareLHS->hasOneUse())
+    if (!LHS->hasOneUse())
       return false;
     // Verify that the ICMP compares against zero.
-    if (CompareRHS->getZExtValue() != 0)
+    if (RHS->getZExtValue() != 0)
       return false;
 
     // Compute the effective CC mask for the new branch or select.
@@ -8586,18 +8537,33 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
   return false;
 }
 
-std::optional<SDValue>
+// Combine and compute CCMask two select_cc for flag output operand where
+// one of operands TrueVal/FalseVal is not constant and subtree select_cc has
+// already been combined/evaluated.
+// This functions can be integerated in combineCCMask with following changes to
+// the design.
+// 1. combineCCMask function should have argument N and DCI as arguments, as
+// operand 0 and 1(TrueVal/FalseVal) required to check Inverted select_cc with
+// respect to subtree select_cc. which is needed if CCMask needs to be inverted
+// or swapping of Op0 and Op1 is required while creatiing new SDValue.
+// 2. This code has to be integrated in the beginning of combineCCMask so that
+// there is early exit from combineCCMask in case this tranformation is applied
+// without interfereing with combineCCMask combining single select_cc sequence.
+// 3. This function calls combineCCMask to combine outer select_cc. It may be
+// an issue if combineCCMask is converetd into iterative.
+// 4. Function combineCCMask is already a long function.
+SDValue
 SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
                                                   DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
-  // Check if N has SystemZ::CC operand.
-  const auto isCCOperand = [](SDNode *N) {
-    if (!N || N->getNumOperands() < 2)
+  // Check if CCOp1 and CCOp2 refers to the same CC condition node.
+  const auto isSameCCIPMOp = [](SDValue &CCOp1, SDValue &CCOp2,
+                                int &CCValidVal) {
+    // Already combined/evaluated sequence.
+    if (CCValidVal != SystemZ::CCMASK_ANY)
       return false;
-    auto *RN = dyn_cast<RegisterSDNode>(N->getOperand(1));
-    if (!RN || !RN->getReg().isPhysical() || RN->getReg() != SystemZ::CC)
-      return false;
-    return true;
+    SDNode *N1 = CCOp1.getNode(), *N2 = CCOp2.getNode();
+    return N1 && N2 && N1 == N2;
   };
 
   auto *TrueVal = dyn_cast<ConstantSDNode>(N->getOperand(0));
@@ -8606,12 +8572,12 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
   // Not yet encountered the case where both operands not constants,
   // that case can be handled by removing this condition.
   if (!((TrueVal != nullptr) ^ (FalseVal != nullptr)))
-    return std::nullopt;
+    return SDValue();
 
   SDValue CCOp = TrueVal ? N->getOperand(1) : N->getOperand(0);
   auto *CCOpNode = CCOp.getNode();
   if (!CCOpNode || CCOpNode->getOpcode() != SystemZISD::SELECT_CCMASK)
-    return std::nullopt;
+    return SDValue();
 
   auto *TrueValOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(0));
   auto *FalseValOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(1));
@@ -8622,13 +8588,13 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
     if (FalseValOp && TrueVal->getZExtValue() == FalseValOp->getZExtValue())
       InvertOp2 = !InvertOp2;
     else if (!TrueValOp || TrueVal->getZExtValue() != TrueValOp->getZExtValue())
-      return std::nullopt;
+      return SDValue();
   } else if (FalseVal) {
     if (TrueValOp && FalseVal->getZExtValue() == TrueValOp->getZExtValue())
       InvertOp1 = !InvertOp1;
     else if (!FalseValOp ||
              FalseVal->getZExtValue() != FalseValOp->getZExtValue())
-      return std::nullopt;
+      return SDValue();
   }
 
   auto *CCValidNode = dyn_cast<ConstantSDNode>(N->getOperand(2));
@@ -8636,7 +8602,7 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
   auto *CCValidOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(2));
   auto *CCMaskOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(3));
   if (!CCValidNode || !CCMaskNode || !CCMaskOp || !CCValidOp)
-    return std::nullopt;
+    return SDValue();
 
   int CCValid = CCValidNode->getZExtValue();
   int CCMaskValOp = CCMaskOp->getZExtValue();
@@ -8644,24 +8610,24 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
   int CCMask = CCMaskNode->getZExtValue();
   bool IsUnionMask = CCMask == SystemZ::CCMASK_CMP_EQ;
   if (CCValid != SystemZ::CCMASK_ICMP)
-    return std::nullopt;
+    return SDValue();
 
   SDValue CCReg = N->getOperand(4);
   SDValue CCRegOp = CCOpNode->getOperand(4);
   // Combine current select_cc.
-  if (combineSRL_IPM_CCMask(CCReg, CCValid, CCMask)) {
+  if (combineCCMask(CCReg, CCValid, CCMask)) {
     if (InvertOp1)
       CCMask ^= SystemZ::CCMASK_ANY;
     // There are two scenarios here.
     // Case 1. Inner (ICMP (SELECT_CCMASK)) has not already been combined into
     // SELECT_CCMASK. Compute  CCMask after optimization.
     // Case 2. Inner (ICMP (SELECT_CCMASK)) already been combined into
-    // SELECT_CCMASK. Check for isCCOperand. In this case we will not know
+    // SELECT_CCMASK. Check for isSameCCIPMOp. In this case we will not know
     // original CCMask, but if only one bit is set in CCMaskValOp, that means
     // original CCMask was SystemZ::CCMASK_CMP_EQ.
-    if (!combineSRL_IPM_CCMask(CCRegOp, CCValidValOp, CCMaskValOp) &&
-        !isCCOperand(CCRegOp.getNode()))
-      return std::nullopt;
+    if (/*!combineCCMask(CCRegOp, CCValidValOp, CCMaskValOp) &&*/
+        !isSameCCIPMOp(CCReg, CCRegOp, CCValidValOp))
+      return SDValue();
     // If outer SELECT_CCMASK is CCMASK_CMP_EQ or single bit is set in
     // CCMaskValOp(inner SELECT_CCMASK is CCMASK_CMP_EQ).
     bool OnlyOneBitSet = CCMaskValOp && !(CCMaskValOp & (CCMaskValOp - 1));
@@ -8692,7 +8658,7 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
         DAG.getTargetConstant(CCValid, SDLoc(N), MVT::i32),
         DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32), CCRegOp);
   }
-  return std::nullopt;
+  return SDValue();
 }
 
 // Merging versus split in multiple branches cost.
@@ -8705,6 +8671,7 @@ SystemZTargetLowering::getJumpConditionMergingParams(Instruction::BinaryOps Opc,
     const Value *RHSVal;
     const APInt *RHSC;
     if (const auto *I = dyn_cast<Instruction>(V)) {
+      // PatternMatch.h provides concise tree-based pattern match of llvm IR.
       if (match(I->getOperand(0), m_And(m_Value(RHSVal), m_APInt(RHSC))) ||
           match(I, m_Cmp(m_Value(RHSVal), m_APInt(RHSC)))) {
         if (const auto *CB = dyn_cast<CallBase>(RHSVal)) {
@@ -8754,11 +8721,19 @@ SDValue SystemZTargetLowering::combineBR_CCMASK(
 
 SDValue SystemZTargetLowering::combineSELECT_CCMASK(
     SDNode *N, DAGCombinerInfo &DCI) const {
-  // Try to combine select_cc with select_cc for flag output operand.
-  // select_cc may have one of True/Flase Operand SDValue.
-  std::optional<SDValue> Res = combineSELECT_CC_CCIPMMask(N, DCI);
-  if (Res.has_value())
-    return Res.value();
+  // Try to combine two select_cc if following two conditions are met.
+  // 1. Subtree select_cc has aleady been combined/evaluated for flag output
+  // operand srl/ipm sequence.
+  // 2. One of True/False operand of select_cc is not constant.
+  // In case it is true, we take early exit returning combined select_cc with
+  // computed CCMask.
+  // Why haven't we apply the same logic in combineBR_CCMASK above? We can add
+  // following code combineSELECT_CC_CCIPMMask in combineBR_CCMASK as well, but
+  // it would not be used as it would call combineSELECT_CCMASK to combine two
+  // select_cc.
+  SDValue Res = combineSELECT_CC_CCIPMMask(N, DCI);
+  if (Res != SDValue())
+    return Res;
 
   SelectionDAG &DAG = DCI.DAG;
 
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 70c742839b3bd..22bd015dc2d81 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -768,11 +768,8 @@ class SystemZTargetLowering : public TargetLowering {
   SDValue combineINT_TO_FP(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
-  std::optional<SDValue> combineBR_CCJoinIPMMask(SDNode *N,
-                                                 DAGCombinerInfo &DCI) const;
   SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
-  std::optional<SDValue> combineSELECT_CC_CCIPMMask(SDNode *N,
-                                                    DAGCombinerInfo &DCI) const;
+  SDValue combineSELECT_CC_CCIPMMask(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const;
diff --git a/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll b/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll
index c6ee8042a5f2a..07fbed9bd0dd7 100644
--- a/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/htm-intrinsics.ll
@@ -90,7 +90,7 @@ define i32 @test_tbegin_nofloat4(i32 %pad, ptr %ptr) {
 ; CHECK: tbegin 0, 65292
 ; CHECK: ipm %r2
 ; CHECK: srl %r2, 28
-; CHECK: ciblh %r2, 2, 0(%r14)
+; CHECK: bnhr %r14
 ; CHECK: mvhi 0(%r3), 0
 ; CHECK: br %r14
   %res = call i32 @llvm.s390.tbegin.nofloat(ptr null, i32 65292)
@@ -219,7 +219,7 @@ define i32 @test_tend2(i32 %pad, ptr %ptr) {
 ; CHECK: tend
 ; CHECK: ipm %r2
 ; CHECK: srl %r2, 28
-; CHECK: ciblh %r2, 2, 0(%r14)
+; CHECK: bnhr %r14
 ; CHECK: mvhi 0(%r3), 0
 ; CHECK: br %r14
   %res = call i32 @llvm.s390.tend()

>From c5c34d36f752cce9a7b0e96f8132f41b847acf11 Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Tue, 24 Jun 2025 12:21:05 +0200
Subject: [PATCH 09/12] Added pattern for XOR, OR, TM, AND, ICMP to
 PerformDAGCombine and simplified conmbineCCMask as a result.

---
 .../Target/SystemZ/SystemZISelLowering.cpp    | 980 +++++++++---------
 llvm/lib/Target/SystemZ/SystemZISelLowering.h |   5 +
 .../flag_output_operand_ccmixed_eq_noteq.ll   |  23 +-
 3 files changed, 520 insertions(+), 488 deletions(-)

diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index a17f2fb300940..52eea424dd210 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -802,7 +802,10 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
                        ISD::SREM,
                        ISD::UREM,
                        ISD::INTRINSIC_VOID,
-                       ISD::INTRINSIC_W_CHAIN});
+                       ISD::INTRINSIC_W_CHAIN,
+                       ISD::AND,
+                       ISD::OR,
+                       ISD::XOR});
 
   // Handle intrinsics.
   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
@@ -8767,355 +8770,118 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     CCReg = IPM->getOperand(0);
     return true;
   };
-  // Check if select_cc has already been combined and uses the same ipm/cc
-  // as CCOp and return evaluated mask CCMaskVal. (SELECT_CCMASK (CC)).
-  const auto isSameCCIPMOp = [](SDValue &CCOp, SDNode *N, int &CCValidVal,
-                                int &CCMaskVal) {
-    if (!N || N->getOpcode() != SystemZISD::SELECT_CCMASK)
-      return false;
-    auto *CCValidNode = dyn_cast<ConstantSDNode>(N->getOperand(2));
-    auto *CCMaskNode = dyn_cast<ConstantSDNode>(N->getOperand(3));
-    if (!CCValidNode || !CCMaskNode)
-      return false;
 
-    CCValidVal = CCValidNode->getZExtValue();
-    // Already been combined.
-    if (CCValidVal != SystemZ::CCMASK_ANY)
-      return false;
-    CCMaskVal = CCMaskNode->getZExtValue();
-    SDValue CCRegOp = N->getOperand(4);
-    auto *CCOpNode = CCOp.getNode(), *CCRegOpNode = CCRegOp.getNode();
-    // Uses the same ipm/cc.
-    return CCOpNode && CCRegOpNode && CCOpNode == CCRegOpNode;
-  };
   auto *CCNode = CCReg.getNode();
   if (!CCNode)
     return false;
 
-  // Optimize (TM (IPM (CC)))
-  if (CCNode->getOpcode() == SystemZISD::TM) {
+  // Check (SRL (IPM)) pattern and update CCReg if true.
+  if (isSRL_IPM_CCSequence(CCNode))
+    return true;
+
+  // This code is common to both combineBR_CCMASK and combineSELECT_CCMASK
+  // Two cases of combining already combined select_cc.
+  // a. Combining br_cc and already combined select_cc.
+  // b. Combining select_cc_a and already combines select_cc_b,
+  // select_cc_a must have both TrueVal and FalseVal Constant.
+  // We return Nested CCMask and updated link to CCReg.
+  // Cases for non-const TrueVal/False Value already have been handled
+  // in combineSELECT_CC_CCIPMMask.
+  if (CCNode->getOpcode() == SystemZISD::SELECT_CCMASK) {
     bool Invert = false;
-    if (CCMask == SystemZ::CCMASK_TM_SOME_1)
-      Invert = !Invert;
-    auto *N = CCNode->getOperand(0).getNode();
-    auto Shift = dyn_cast<ConstantSDNode>(CCNode->getOperand(1));
-    if (!N || !Shift)
-      return false;
-    if (N->getOpcode() == SystemZISD::IPM) {
-      auto ShiftVal = Shift->getZExtValue();
-      if (ShiftVal == (1 << SystemZ::IPM_CC))
-        CCMask = SystemZ::CCMASK_CMP_GE;
-      if (Invert)
-        CCMask ^= CCValid;
-      // Return the updated CCReg link.
-      CCReg = N->getOperand(0);
-      return true;
-    } else if (N->getOpcode() == ISD::XOR) {
-      // Optimize (TM (XOR (OP1 OP2))).
-      auto *XOROp1 = N->getOperand(0).getNode();
-      auto *XOROp2 = N->getOperand(1).getNode();
-      if (!XOROp1 || !XOROp2)
+    // Outer select_cc is TM.
+    if (CCValid == SystemZ::CCMASK_TM) {
+      if (CCMask == SystemZ::CCMASK_TM_SOME_1)
+        Invert = !Invert;
+      else if (CCMask != SystemZ::CCMASK_TM_ALL_0)
         return false;
-      // OP1. (SELECT_CCMASK (ICMP (SRL (IPM (CC))))).
-      // OP2. (SRL (IPM (CC))).
-      if (XOROp1->getOpcode() == SystemZISD::SELECT_CCMASK /*&&
-          isSRL_IPM_CCSequence(XOROp2)*/) {
-        auto *CCValid1 = dyn_cast<ConstantSDNode>(XOROp1->getOperand(2));
-        auto *CCMask1 = dyn_cast<ConstantSDNode>(XOROp1->getOperand(3));
-        SDValue XORReg = XOROp1->getOperand(4);
-        if (!CCValid1 || !CCMask1)
-          return false;
-        int CCValidVal = CCValid1->getZExtValue();
-        int CCMaskVal = CCMask1->getZExtValue();
-        if (combineCCMask(XORReg, CCValidVal, CCMaskVal)) {
-          // CC == 0 || CC == 2 for bit 28 Test Under Mask.
-          CCMask = SystemZ::CCMASK_CMP_GE;
-          CCMask ^= CCMaskVal;
-          if (Invert)
-            CCMask ^= CCValid;
-          CCReg = XORReg;
-          return true;
-        }
-      }
-    }
-  }
-  // Optimize (AND (SRL (IPM (CC)))).
-  if (CCNode->getOpcode() == ISD::AND) {
-    auto *N = CCNode->getOperand(0).getNode();
-    if (!isSRL_IPM_CCSequence(N))
+    } else if (CCValid != SystemZ::CCMASK_ICMP)
       return false;
-    auto *ANDConst = dyn_cast<ConstantSDNode>(CCNode->getOperand(1));
-    if (!ANDConst)
+    auto *CCNValid = dyn_cast<ConstantSDNode>(CCNode->getOperand(2));
+    auto *CCNMask = dyn_cast<ConstantSDNode>(CCNode->getOperand(3));
+    // Check if select_cc has already been combined.
+    if (!CCNValid || !CCNMask ||
+        CCNValid->getZExtValue() != SystemZ::CCMASK_ANY)
       return false;
-    // Bit 28 false (CC == 0) || (CC == 2).
-    // Caller can invert it depending on CCmask there.
-    if (ANDConst->getZExtValue() == 1) {
-      CCMask = SystemZ::CCMASK_0 | SystemZ::CCMASK_2;
-      CCValid = SystemZ::CCMASK_ANY;
-      return true;
-    }
-    return false;
+    CCValid = SystemZ::CCMASK_ANY;
+    CCMask = CCNMask->getZExtValue();
+    if (Invert)
+      CCMask ^= SystemZ::CCMASK_ANY;
+    // Update CCReg link.
+    CCReg = CCNode->getOperand(4);
+    return true;
   }
-  // (SELECT_CCMASK (ICMP (SRL (IPM (CC)))))
-  if (CCNode->getOpcode() == SystemZISD::SELECT_CCMASK) {
-    auto *CCValidNode = dyn_cast<ConstantSDNode>(CCNode->getOperand(2));
-    auto *CCMaskNode = dyn_cast<ConstantSDNode>(CCNode->getOperand(3));
-    if (!CCValidNode || !CCMaskNode)
-      return false;
 
-    int CCValidVal = CCValidNode->getZExtValue();
-    int CCMaskVal = CCMaskNode->getZExtValue();
-    SDValue CCRegOp = CCNode->getOperand(4);
-    if (combineCCMask(CCRegOp, CCValidVal, CCMaskVal)) {
-      CCMask = CCMaskVal;
-      CCValid = SystemZ::CCMASK_ANY;
-      CCReg = CCRegOp;
-      return true;
-    }
-    return false;
-  }
+  // Rest of the code has sequence starting with opcode SystemZISD::ICMP.
+  // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
+  // set by the CCReg instruction using the CCValid / CCMask masks,
+  // If the CCReg instruction is itself a ICMP testing the condition
+  // code set by some other instruction, see whether we can directly
+  // use that condition code.
 
-  // Both oerands of XOR are (SELECT_CCMASK (ICMP (SRL (IPM (CC))))).
-  if (CCNode->getOpcode() == ISD::XOR) {
-    if (isa<ConstantSDNode>(CCNode->getOperand(0)) ||
-        isa<ConstantSDNode>(CCNode->getOperand(1)))
-      return false;
-    auto *XOROp1 = CCNode->getOperand(0).getNode();
-    auto *XOROp2 = CCNode->getOperand(1).getNode();
-    if (!XOROp1 || !XOROp2)
-      return false;
-    // Both Operands are select_cc.
-    if (XOROp1->getOpcode() == SystemZISD::SELECT_CCMASK &&
-        XOROp2->getOpcode() == SystemZISD::SELECT_CCMASK) {
-      auto *CCValid1 = dyn_cast<ConstantSDNode>(XOROp1->getOperand(2));
-      auto *CCMask1 = dyn_cast<ConstantSDNode>(XOROp1->getOperand(3));
-      auto *CCValid2 = dyn_cast<ConstantSDNode>(XOROp2->getOperand(2));
-      auto *CCMask2 = dyn_cast<ConstantSDNode>(XOROp2->getOperand(3));
-      if (!CCValid1 || !CCMask1 || !CCValid2 || !CCMask2)
-        return false;
-      int CCValidVal1 = CCValid1->getZExtValue();
-      int CCMaskVal1 = CCMask1->getZExtValue();
-      int CCValidVal2 = CCValid2->getZExtValue();
-      int CCMaskVal2 = CCMask2->getZExtValue();
-      SDValue CCReg1 = XOROp1->getOperand(4);
-      SDValue CCReg2 = XOROp2->getOperand(4);
-      if (!combineCCMask(CCReg1, CCValidVal1, CCMaskVal1) ||
-          !combineCCMask(CCReg2, CCValidVal2, CCMaskVal2))
-        return false;
-      CCMask = CCMaskVal1 ^ CCMaskVal2;
-      CCReg = CCReg1;
-      CCValid = SystemZ::CCMASK_ANY;
-      return true;
-    }
+  // Verify that we have an ICMP against some constant.
+  if (CCValid != SystemZ::CCMASK_ICMP)
     return false;
-  }
-
-  // Rest of the code handle  ICMP cases.
-  // Handle the case (ICMP (OP (SRL (IPM (CC)))))
-  if (!CCNode || CCNode->getOpcode() != SystemZISD::ICMP)
+  if (CCNode->getOpcode() != SystemZISD::ICMP)
     return false;
-  auto *LHS = CCNode->getOperand(0).getNode();
-  auto *RHS = dyn_cast<ConstantSDNode>(CCNode->getOperand(1));
-  if (!LHS || LHS->getOpcode() == ISD::Constant)
+  auto *CompareLHS = CCNode->getOperand(0).getNode();
+  auto *CompareRHS = dyn_cast<ConstantSDNode>(CCNode->getOperand(1));
+  if (!CompareRHS)
     return false;
 
-  // (BR_CC (ICMP (Op1 Op2))), Op1 Op2 will have (SRL (IPM (CC))) sequence.
-  // SystemZ::ICMP second operand is not constant.
-  if (!RHS) {
-    SDValue CmpOp1 = CCNode->getOperand(0);
-    SDValue CmpOp2 = CCNode->getOperand(1);
-    auto *CmpNode1 = CmpOp1.getNode(), *CmpNode2 = CmpOp2.getNode();
-    if (!CmpNode1 || !CmpNode2)
-      return false;
-    if (CmpNode1->getOpcode() == SystemZISD::SELECT_CCMASK ||
-        CmpNode2->getOpcode() == SystemZISD::SELECT_CCMASK) {
-      SDValue CmpOp =
-          CmpNode1->getOpcode() == SystemZISD::SELECT_CCMASK ? CmpOp2 : CmpOp1;
-      SDNode *SelectCC = CmpNode1->getOpcode() == SystemZISD::SELECT_CCMASK
-                             ? CmpNode1
-                             : CmpNode2;
-      int CmpCCValid = CCValid, SelectCCValid = CCValid;
-      int CmpCCMask = CCMask, SelectCCMask = CCMask;
-      bool IsOp1 = combineCCMask(CmpOp, CmpCCValid, CmpCCMask);
-      bool IsOp2 = isSameCCIPMOp(CmpOp, SelectCC, SelectCCValid, SelectCCMask);
-      if (IsOp1 && IsOp2) {
-        CCMask = CmpCCMask ^ SelectCCMask;
-        CCReg = CmpOp;
-        CCValid = SystemZ::CCMASK_ANY;
-        return true;
-      }
-    }
-    return false;
-  }
-  int CmpVal = RHS->getZExtValue();
-  // (BR_CC (ICMP (SELECT_CCMASK (CC))))
-  if (LHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
-    int CCVal = RHS->getZExtValue();
-    int Mask = CCMaskForICmpEQCCVal(CCVal);
-    bool Invert = false;
-    if (CCMask == SystemZ::CCMASK_CMP_NE)
-      Invert = !Invert;
-    SDValue NewCCReg = CCNode->getOperand(0);
-    if (combineCCMask(NewCCReg, CCValid, CCMask)) {
-      CCMask |= Mask;
-      if (Invert)
-        CCMask ^= SystemZ::CCMASK_ANY;
-      CCReg = NewCCReg;
+  // Optimize the case where LHS is (ICMP (SRL (IPM))).
+  int CmpVal = CompareRHS->getZExtValue();
+  if (isSRL_IPM_CCSequence(CompareLHS)) {
+    if (convertCCValToCCMask(CmpVal)) {
       CCValid = SystemZ::CCMASK_ANY;
       return true;
-    } else if (CCMask == SystemZ::CCMASK_CMP_NE ||
-               CCMask != SystemZ::CCMASK_CMP_EQ) {
-      // Original combineCCMask.
-      // Verify that the ICMP compares against one of select values.
-      auto *TrueVal = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
-      if (!TrueVal)
-        return false;
-      auto *FalseVal = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
-      if (!FalseVal)
-        return false;
-      if (RHS->getAPIntValue() == FalseVal->getAPIntValue())
-        Invert = !Invert;
-      else if (RHS->getAPIntValue() != TrueVal->getAPIntValue())
-        return false;
-
-      // Compute the effective CC mask for the new branch or select.
-      auto *NewCCValid = dyn_cast<ConstantSDNode>(LHS->getOperand(2));
-      auto *NewCCMask = dyn_cast<ConstantSDNode>(LHS->getOperand(3));
-      if (!NewCCValid || !NewCCMask)
-        return false;
-      CCValid = NewCCValid->getZExtValue();
-      CCMask = NewCCMask->getZExtValue();
-      if (Invert)
-        CCMask ^= CCValid;
-
-      // Return the updated CCReg link.
-      CCReg = LHS->getOperand(4);
-      return true;
     }
     return false;
   }
-  // (BR_CC (ICMP OR ((SRL (IPM (CC))) (SELECT_CCMASK (CC)))))
-  if (LHS->getOpcode() == ISD::OR) {
-    bool Invert = false;
-    if (CCMask == SystemZ::CCMASK_CMP_NE)
-      Invert = !Invert;
-    SDValue OrOp1 = LHS->getOperand(0);
-    SDValue OrOp2 = LHS->getOperand(1);
-    int CCValid1 = CCValid, CCValid2 = CCValid;
-    int NewCCMask1 = CCMask, NewCCMask2 = CCMask, NewCCMask = CCMask;
-    if (!isa<ConstantSDNode>(OrOp1) && !isa<ConstantSDNode>(OrOp2)) {
-      bool IsOp1 = combineCCMask(OrOp1, CCValid1, NewCCMask1);
-      bool IsOp2 = combineCCMask(OrOp2, CCValid2, NewCCMask2);
-      if (!IsOp1 && !IsOp2) {
-        return false;
-      }
-      if (IsOp1 && IsOp2) {
-        NewCCMask = NewCCMask1 | NewCCMask2;
-        bool IsEqualCmpVal = NewCCMask == CmpVal;
-        if ((CCMask == SystemZ::CCMASK_CMP_NE && IsEqualCmpVal) ||
-            (CCMask == SystemZ::CCMASK_CMP_EQ && !IsEqualCmpVal))
-          NewCCMask ^= SystemZ::CCMASK_ANY;
-        CCReg = OrOp1;
-        CCMask = NewCCMask;
-        CCValid = SystemZ::CCMASK_ANY;
-        return true;
-      }
-    } else if (isa<ConstantSDNode>(OrOp2)) {
-      if (isSRL_IPM_CCSequence(OrOp1.getNode())) {
-        auto *OrConst = dyn_cast<ConstantSDNode>(OrOp2);
-        int OrConstVal = OrConst->getZExtValue();
-        if (!OrConst || (OrConstVal & 0x3))
-          return false;
-        // setullt unsigned(-2), mask = 0x1100
-        // setugt unsigned(-4), mask = 0x0011
-        CmpVal &= 0x3;
-        if (convertCCValToCCMask(CmpVal)) {
-          CCValid = SystemZ::CCMASK_ANY;
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-  // (BR_CC (ICMP AND ((SRL (IPM (CC))) (SELECT_CCMASK (CC)))))
-  if (LHS->getOpcode() == ISD::AND) {
-    bool Invert = false;
-    if (CCMask == SystemZ::CCMASK_CMP_NE)
-      Invert = !Invert;
-    SDValue AndOp1 = LHS->getOperand(0);
-    SDValue AndOp2 = LHS->getOperand(1);
-    int NewCCMask1 = CCMask, NewCCMask2 = CCMask, NewCCMask;
-    int CCValid1 = CCValid, CCValid2 = CCValid;
-    if (!isa<ConstantSDNode>(AndOp1) && !isa<ConstantSDNode>(AndOp2)) {
-      bool IsOp1 = combineCCMask(AndOp1, CCValid1, NewCCMask1);
-      bool IsOp2 = combineCCMask(AndOp2, CCValid2, NewCCMask2);
-      if (!IsOp1 && !IsOp2)
+
+  // Optimize the case where LHS is (ICMP (OR (SRL (IPM (CC))))).
+  // t24: i32 = or disjoint t21, Constant:i32<-4>
+  // t40: i32 = SystemZISD::ICMP t24, Constant:i32<-2>, TargetConstant:i32<1>
+  if (CompareLHS->getOpcode() == ISD::OR) {
+    SDValue OrOp0 = CompareLHS->getOperand(0);
+    SDValue OrOp1 = CompareLHS->getOperand(1);
+    // Op0 is (SRL (IPM (CC)). Op1 is const.
+    if (isSRL_IPM_CCSequence(OrOp0.getNode())) {
+      auto *OrConst = dyn_cast<ConstantSDNode>(OrOp1);
+      // Op1 is Constant:i32<-4>.
+      if (!OrConst || (OrConst->getZExtValue() & 0x3))
         return false;
-      if (IsOp1 && IsOp2) {
-        NewCCMask = NewCCMask1 & NewCCMask2;
-        bool IsEqualCmpVal = NewCCMask == CmpVal;
-        if ((CCMask == SystemZ::CCMASK_CMP_NE && IsEqualCmpVal) ||
-            (CCMask == SystemZ::CCMASK_CMP_EQ && !IsEqualCmpVal))
-          NewCCMask ^= SystemZ::CCMASK_ANY;
-        CCMask = NewCCMask;
-        CCReg = AndOp1;
-        CCValid = SystemZ::CCMASK_ANY;
-        return true;
-      } else {
-        if (IsOp1 && isSRL_IPM_CCSequence(AndOp2.getNode()))
-          NewCCMask = NewCCMask1;
-        else if (isSRL_IPM_CCSequence(AndOp2.getNode()) && IsOp2)
-          NewCCMask = NewCCMask2;
-        else
-          return false;
-        // Bit 29 set => CC == 2 || CC == 3.
-        if ((NewCCMask & 0x3) == 2)
-          NewCCMask = SystemZ::CCMASK_2 | SystemZ::CCMASK_3;
-        // Bit 28 set => CC == 1 || CC == 3.
-        else if ((NewCCMask & 0x3) == 1)
-          NewCCMask = SystemZ::CCMASK_1 | SystemZ::CCMASK_3;
-        int CCVal = RHS->getZExtValue();
-        int Mask = CCMaskForICmpEQCCVal(CCVal);
-        CCMask = Mask | NewCCMask;
-        if (Invert ^ CmpVal)
-          CCMask ^= SystemZ::CCMASK_ANY;
+      // setullt unsigned(-2) or  setugt unsigned(-3).
+      // mask = 0x1100 => CC != 2 && CC != 3.
+      CmpVal &= 0x3;
+      if (convertCCValToCCMask(CmpVal)) {
         CCValid = SystemZ::CCMASK_ANY;
         return true;
       }
     }
-    return false;
   }
-  // Optimize the case where LHS is (ICMP (SRL (IPM))).
-  if (isSRL_IPM_CCSequence(LHS)) {
-    unsigned CCVal = RHS->getZExtValue();
-    if (convertCCValToCCMask(CCVal)) {
-      CCValid = SystemZ::CCMASK_ANY;
-      return true;
-    }
-    return false;
-  }
-  if (LHS->getOpcode() == ISD::ADD) {
-    if (isSRL_IPM_CCSequence(LHS->getOperand(0).getNode())) {
-      int CCVal = RHS->getZExtValue();
+
+  // Optimize the case where LHS is (ICMP (ADD (SRL (IPM (CC))))).
+  if (CompareLHS->getOpcode() == ISD::ADD) {
+    if (isSRL_IPM_CCSequence(CompareLHS->getOperand(0).getNode())) {
       // (unsigned) CCVal - 1 or (unsigned) CCVal - 3 Inverted.
-      // CCMask == SystemZ::CCMASK_CMP_LT, CCVal <= 2 => CC == 1 || CC == 2.
-      // CCMask == SystemZ::CCMASK_CMP_LT and CCVal <= 3 =>
-      // CC == 1 || CC == 2 || CC == 3.
-      auto *AddConstOp = dyn_cast<ConstantSDNode>((LHS->getOperand(1)));
+      // CCVal <= 2 => CC == 1 || CC == 2.
+      // CCVal <= 3 => CC == 1 || CC == 2 || CC == 3.
+      // (CCVal - AddConst) < CmpVal.
+      auto *AddConstOp = dyn_cast<ConstantSDNode>((CompareLHS->getOperand(1)));
       int AddConst = AddConstOp->getZExtValue();
+      if ((AddConst != -1) && (AddConst != -3))
+        return false;
       bool Invert = false;
-      if (CCVal < 0) {
+      if (CmpVal < 0) {
         Invert = !Invert;
-        // setult unsigned(-2), AddConst == -3.
         AddConst = AddConst & 0x3;
       } else
         AddConst = ~AddConst + 1;
-      // As original CCMask of of SELECT_CCMASK/BR_CCMASK does not have
-      // <= or >=.
-      CCVal &= 0x3;
-      CCVal += AddConst;
-      if (convertCCValToCCMask(CCVal)) {
-        // CCVal can not zero here.
+      CmpVal &= 0x3;
+      CmpVal += AddConst;
+      if (convertCCValToCCMask(CmpVal)) {
+        // CCVal > 0.
         CCMask ^= SystemZ::CCMASK_CMP_EQ;
         if (Invert)
           CCMask ^= SystemZ::CCMASK_ANY;
@@ -9126,55 +8892,48 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     return false;
   }
 
-  // Optimize (ICMP (XOR (OP1 OP2))), OP1 or OP2 could be XOR again.
-  // One or both of operands could be (SELECT_CCMASK (ICMP (SRL (IPM (CC))))).
-  if (LHS->getOpcode() == ISD::XOR) {
-    SDValue XORReg = CCReg->getOperand(0);
+  // Optimize the case where CompareLHS is a SELECT_CCMASK.
+  if (CompareLHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
+    // Verify that we have an appropriate mask for a EQ or NE comparison.
     bool Invert = false;
     if (CCMask == SystemZ::CCMASK_CMP_NE)
       Invert = !Invert;
-    // If both the operands are select_cc.
-    if (combineCCMask(XORReg, CCValid, CCMask)) {
-      CCReg = XORReg;
-      CCValid = SystemZ::CCMASK_ANY;
-      return true;
-    }
-    // Handle the case when one of the operand is select_cc and other operand
-    // could be xor again having both operands as select_cc.
-    auto *XOROp1 = LHS->getOperand(0).getNode();
-    auto *XOROp2 = LHS->getOperand(1).getNode();
-    if (!XOROp1 || !XOROp2)
+    else if (CCMask != SystemZ::CCMASK_CMP_EQ)
       return false;
-    if (XOROp1->getOpcode() == SystemZISD::SELECT_CCMASK ||
-        XOROp2->getOpcode() == SystemZISD::SELECT_CCMASK) {
-      auto *XOROp =
-          XOROp1->getOpcode() == SystemZISD::SELECT_CCMASK ? XOROp1 : XOROp2;
-      auto *CCMaskNode = dyn_cast<ConstantSDNode>(XOROp->getOperand(3));
-      auto *CCValidNode = dyn_cast<ConstantSDNode>(XOROp->getOperand(2));
-      if (!CCValidNode || !CCMaskNode)
-        return false;
-      int CCValidVal = CCValidNode->getZExtValue();
-      int CCMaskVal = CCMaskNode->getZExtValue();
-      SDValue XORReg1 = XOROp->getOperand(4);
-      SDValue XORReg2 = LHS->getOperand(1);
-      int CCMaskVal1 = CCMaskVal, CCMaskVal2 = CCMaskVal;
-      if (combineCCMask(XORReg1, CCValidVal, CCMaskVal1) &&
-          combineCCMask(XORReg2, CCValidVal, CCMaskVal2)) {
-        CCMask = CCMaskVal1 ^ CCMaskVal2;
-        CCReg = XORReg1;
-        CCValid = SystemZ::CCMASK_ANY;
-        return true;
-      }
-    }
+
+    // Verify that the ICMP compares against one of select values.
+    auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
+    if (!TrueVal)
+      return false;
+    auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
+    if (!FalseVal)
+      return false;
+    if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
+      Invert = !Invert;
+    else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
+      return false;
+
+    // Compute the effective CC mask for the new branch or select.
+    auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
+    auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
+    if (!NewCCValid || !NewCCMask)
+      return false;
+    CCValid = NewCCValid->getZExtValue();
+    CCMask = NewCCMask->getZExtValue();
+    if (Invert)
+      CCMask ^= CCValid;
+
+    // Return the updated CCReg link.
+    CCReg = CompareLHS->getOperand(4);
+    return true;
   }
 
-  // Original combineCCMask.
-  // Optimize the case where RHS is (SRA (SHL (IPM))).
-  if (LHS->getOpcode() == ISD::SRA) {
-    auto *SRACount = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
+  // Optimize the case where CompareRHS is (SRA (SHL (IPM))).
+  if (CompareLHS->getOpcode() == ISD::SRA) {
+    auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
     if (!SRACount || SRACount->getZExtValue() != 30)
       return false;
-    auto *SHL = LHS->getOperand(0).getNode();
+    auto *SHL = CompareLHS->getOperand(0).getNode();
     if (SHL->getOpcode() != ISD::SHL)
       return false;
     auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
@@ -9185,10 +8944,10 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
       return false;
 
     // Avoid introducing CC spills (because SRA would clobber CC).
-    if (!LHS->hasOneUse())
+    if (!CompareLHS->hasOneUse())
       return false;
     // Verify that the ICMP compares against zero.
-    if (RHS->getZExtValue() != 0)
+    if (CompareRHS->getZExtValue() != 0)
       return false;
 
     // Compute the effective CC mask for the new branch or select.
@@ -9198,24 +8957,12 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     CCReg = IPM->getOperand(0);
     return true;
   }
+
   return false;
 }
 
-// Combine and compute CCMask two select_cc for flag output operand where
-// one of operands TrueVal/FalseVal is not constant and subtree select_cc has
-// already been combined/evaluated.
-// This functions can be integerated in combineCCMask with following changes to
-// the design.
-// 1. combineCCMask function should have argument N and DCI as arguments, as
-// operand 0 and 1(TrueVal/FalseVal) required to check Inverted select_cc with
-// respect to subtree select_cc. which is needed if CCMask needs to be inverted
-// or swapping of Op0 and Op1 is required while creatiing new SDValue.
-// 2. This code has to be integrated in the beginning of combineCCMask so that
-// there is early exit from combineCCMask in case this tranformation is applied
-// without interfereing with combineCCMask combining single select_cc sequence.
-// 3. This function calls combineCCMask to combine outer select_cc. It may be
-// an issue if combineCCMask is converetd into iterative.
-// 4. Function combineCCMask is already a long function.
+// Combine (select_cc_a (select_cc_b)), where select_cc_a has one of TrueVal
+// or FalseVal has nested select_cc_b(already been combined sequence)
 SDValue
 SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
                                                   DAGCombinerInfo &DCI) const {
@@ -9223,106 +8970,116 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
   // Check if CCOp1 and CCOp2 refers to the same CC condition node.
   const auto isSameCCIPMOp = [](SDValue &CCOp1, SDValue &CCOp2,
                                 int &CCValidVal) {
-    // Already combined/evaluated sequence.
+    // Already combined sequence.
     if (CCValidVal != SystemZ::CCMASK_ANY)
       return false;
     SDNode *N1 = CCOp1.getNode(), *N2 = CCOp2.getNode();
     return N1 && N2 && N1 == N2;
   };
 
-  auto *TrueVal = dyn_cast<ConstantSDNode>(N->getOperand(0));
-  auto *FalseVal = dyn_cast<ConstantSDNode>(N->getOperand(1));
+  auto *OuterTrueVal = dyn_cast<ConstantSDNode>(N->getOperand(0));
+  auto *OuterFalseVal = dyn_cast<ConstantSDNode>(N->getOperand(1));
+
   // Already handled the case both operands constant in combineCCMask.
-  // Not yet encountered the case where both operands not constants,
+  // Not yet encountered the case where both operands are sub-expressions,
   // that case can be handled by removing this condition.
-  if (!((TrueVal != nullptr) ^ (FalseVal != nullptr)))
+  if (!((OuterTrueVal != nullptr) ^ (OuterFalseVal != nullptr)))
     return SDValue();
 
-  SDValue CCOp = TrueVal ? N->getOperand(1) : N->getOperand(0);
-  auto *CCOpNode = CCOp.getNode();
-  if (!CCOpNode || CCOpNode->getOpcode() != SystemZISD::SELECT_CCMASK)
+  SDValue NestedCCOp = OuterTrueVal ? N->getOperand(1) : N->getOperand(0);
+  auto *NestedCCNode = NestedCCOp.getNode();
+  // check if nested select_cc_b has already been combined.
+  if (!NestedCCNode || NestedCCNode->getOpcode() != SystemZISD::SELECT_CCMASK)
     return SDValue();
 
-  auto *TrueValOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(0));
-  auto *FalseValOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(1));
-  bool InvertOp1 = false, InvertOp2 = false;
-  // Check if outer select_cc and inner select_cc True/False matching or
-  // inverted.
-  if (TrueVal) {
-    if (FalseValOp && TrueVal->getZExtValue() == FalseValOp->getZExtValue())
-      InvertOp2 = !InvertOp2;
-    else if (!TrueValOp || TrueVal->getZExtValue() != TrueValOp->getZExtValue())
+  auto *NestedTrueVal = dyn_cast<ConstantSDNode>(NestedCCNode->getOperand(0));
+  auto *NestedFalseVal = dyn_cast<ConstantSDNode>(NestedCCNode->getOperand(1));
+  if (!NestedTrueVal || !NestedFalseVal)
+    return SDValue();
+  bool Invert = false;
+  // Check if outer select_cc_a and nested select_cc_b True/False matching
+  // or inverted.
+  if (OuterTrueVal) {
+    // OuterFalseVal points to already combined nested select_cc_b.
+    if (OuterTrueVal->getZExtValue() == NestedFalseVal->getZExtValue())
+      Invert = !Invert; // Inverted.
+    else if (OuterTrueVal->getZExtValue() != NestedTrueVal->getZExtValue())
       return SDValue();
-  } else if (FalseVal) {
-    if (TrueValOp && FalseVal->getZExtValue() == TrueValOp->getZExtValue())
-      InvertOp1 = !InvertOp1;
-    else if (!FalseValOp ||
-             FalseVal->getZExtValue() != FalseValOp->getZExtValue())
+  } else if (OuterFalseVal) {
+    // OuterTrueVal points to already combined nested select_cc_b.
+    if (OuterFalseVal->getZExtValue() == NestedTrueVal->getZExtValue())
+      Invert = !Invert; // Inverted.
+    else if (OuterFalseVal->getZExtValue() != NestedFalseVal->getZExtValue())
       return SDValue();
   }
-
-  auto *CCValidNode = dyn_cast<ConstantSDNode>(N->getOperand(2));
-  auto *CCMaskNode = dyn_cast<ConstantSDNode>(N->getOperand(3));
-  auto *CCValidOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(2));
-  auto *CCMaskOp = dyn_cast<ConstantSDNode>(CCOpNode->getOperand(3));
-  if (!CCValidNode || !CCMaskNode || !CCMaskOp || !CCValidOp)
+  auto *OuterCCValid = dyn_cast<ConstantSDNode>(N->getOperand(2));
+  auto *OuterCCMask = dyn_cast<ConstantSDNode>(N->getOperand(3));
+  auto *NestedCCValid = dyn_cast<ConstantSDNode>(NestedCCOp->getOperand(2));
+  auto *NestedCCMask = dyn_cast<ConstantSDNode>(NestedCCOp->getOperand(3));
+  if (!OuterCCValid || !OuterCCMask || !NestedCCValid || !NestedCCMask)
     return SDValue();
 
-  int CCValid = CCValidNode->getZExtValue();
-  int CCMaskValOp = CCMaskOp->getZExtValue();
-  int CCValidValOp = CCValidOp->getZExtValue();
-  int CCMask = CCMaskNode->getZExtValue();
-  bool IsUnionMask = CCMask == SystemZ::CCMASK_CMP_EQ;
-  if (CCValid != SystemZ::CCMASK_ICMP)
+  int OuterCCValidVal = OuterCCValid->getZExtValue();
+  int OuterCCMaskVal = OuterCCMask->getZExtValue();
+  int NestedCCValidVal = NestedCCValid->getZExtValue();
+  int NestedCCMaskVal = NestedCCMask->getZExtValue();
+  int CCMask = OuterCCMaskVal;
+  SDValue OuterCCReg = N->getOperand(4);
+  SDValue NestedCCReg = NestedCCOp->getOperand(4);
+
+  // Combine two already combined (select_cc_a (select_cc_b)), where TrueVal
+  // of select_cc_a points to select_cc_b. We return select_cc with TrueVal
+  // and FalseVal from select_cc_b with combined CCMask.
+  // One of OuterTrueVal or OuterFalseVal has select_cc_b.
+  if ((OuterTrueVal != nullptr) ^ (OuterFalseVal != nullptr)) {
+    // Both OuterCCValidVal and NestedCCValidVal have already been combined.
+    if (OuterCCValidVal == SystemZ::CCMASK_ANY &&
+        // And both points to the same CC.
+        isSameCCIPMOp(OuterCCReg, NestedCCReg, NestedCCValidVal)) {
+      CCMask |= NestedCCMaskVal;
+      // NestedCCOp has both operands constants.
+      auto Op0 = NestedCCOp->getOperand(0);
+      auto Op1 = NestedCCOp->getOperand(1);
+      // Return combined select_cc.
+      return DAG.getNode(
+          SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), Op0, Op1,
+          DAG.getTargetConstant(OuterCCValidVal, SDLoc(N), MVT::i32),
+          DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32), NestedCCReg);
+    }
+  }
+  // Now handles the case where outer select_cc_a has not yet been combined.
+  // Combine outer select_cc and check if it corresponds to the same
+  // CC as nested CC.
+  // Outer select_cc has yet not been combined.
+  if (OuterCCValidVal != SystemZ::CCMASK_ICMP ||
+      // Try combining outer select_cc.
+      !combineCCMask(OuterCCReg, OuterCCValidVal, OuterCCMaskVal) ||
+      // Check nested select_cc has already been combined and points to
+      // same Condtiion code as outer select_cc.
+      !isSameCCIPMOp(OuterCCReg, NestedCCReg, NestedCCValidVal))
     return SDValue();
+  // Check if nested select_cc original CCMask was CCMASK_CMP_EQ.
+  // Only one-bit is set in NestedCCMaskVal for CCMASK_CMP_EQ.
+  bool IsNestedCMP_EQ =
+      NestedCCMaskVal && !(NestedCCMaskVal & (NestedCCMaskVal - 1));
 
-  SDValue CCReg = N->getOperand(4);
-  SDValue CCRegOp = CCOpNode->getOperand(4);
-  // Combine current select_cc.
-  if (combineCCMask(CCReg, CCValid, CCMask)) {
-    if (InvertOp1)
-      CCMask ^= SystemZ::CCMASK_ANY;
-    // There are two scenarios here.
-    // Case 1. Inner (ICMP (SELECT_CCMASK)) has not already been combined into
-    // SELECT_CCMASK. Compute  CCMask after optimization.
-    // Case 2. Inner (ICMP (SELECT_CCMASK)) already been combined into
-    // SELECT_CCMASK. Check for isSameCCIPMOp. In this case we will not know
-    // original CCMask, but if only one bit is set in CCMaskValOp, that means
-    // original CCMask was SystemZ::CCMASK_CMP_EQ.
-    if (/*!combineCCMask(CCRegOp, CCValidValOp, CCMaskValOp) &&*/
-        !isSameCCIPMOp(CCReg, CCRegOp, CCValidValOp))
-      return SDValue();
-    // If outer SELECT_CCMASK is CCMASK_CMP_EQ or single bit is set in
-    // CCMaskValOp(inner SELECT_CCMASK is CCMASK_CMP_EQ).
-    bool OnlyOneBitSet = CCMaskValOp && !(CCMaskValOp & (CCMaskValOp - 1));
-    // Original CCMask of current  SELECT_CCMASK is SystemZ::CCMASK_CMP_EQ,
-    // or Original CCMask of inner SELECT_CCMASK before actual CCMask
-    // computation is SystemZ::CCMASK_CMP_EQ.
-    IsUnionMask =
-        IsUnionMask || CCMaskValOp == SystemZ::CCMASK_CMP_EQ || OnlyOneBitSet;
-    if (InvertOp2)
-      CCMaskValOp ^= SystemZ::CCMASK_ANY;
-    if (IsUnionMask)
-      CCMask |= CCMaskValOp;
-    // Original outer SELECT_CCMASK has CCMask one of SystemZ::CCMASK_CMP_LT,
-    // SystemZ::CCMASK_CMP_LT,  SystemZ::CCMASK_CMP_NE,
-    // and inner CCMaskValOP is also not SystemZ::CCMASK_CMP_EQ,
-    // Taking intersection. In case of outer SystemZ::CCMASK_CMP_NE and inner
-    // as well, !(!a || !b) => (a & b).
-    else
-      CCMask &= CCMaskValOp;
-    auto Op0 = CCOpNode->getOperand(0);
-    auto Op1 = CCOpNode->getOperand(1);
-    // Inner select_cc True/False is inverted w.r.t outer. We are using inner
-    // select_cc to get CCRegOp and CCOpNode.
-    if (InvertOp2)
-      std::swap(Op0, Op1);
-    return DAG.getNode(
-        SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), Op0, Op1,
-        DAG.getTargetConstant(CCValid, SDLoc(N), MVT::i32),
-        DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32), CCRegOp);
-  }
-  return SDValue();
+  // Outer select_cc is inverted with respect to nested select_cc.
+  if (Invert)
+    OuterCCMaskVal ^= SystemZ::CCMASK_ANY;
+
+  // Intersection of masks.
+  if (CCMask != SystemZ::CCMASK_CMP_EQ && !IsNestedCMP_EQ)
+    OuterCCMaskVal &= NestedCCMaskVal;
+  else // Union the masks.
+    OuterCCMaskVal |= NestedCCMaskVal;
+
+  SDValue Op0 = NestedCCOp->getOperand(0);
+  SDValue Op1 = NestedCCOp->getOperand(1);
+
+  return DAG.getNode(
+      SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), Op0, Op1,
+      DAG.getTargetConstant(OuterCCValidVal, SDLoc(N), MVT::i32),
+      DAG.getTargetConstant(OuterCCMaskVal, SDLoc(N), MVT::i32), OuterCCReg);
 }
 
 // Merging versus split in multiple branches cost.
@@ -9359,8 +9116,290 @@ SystemZTargetLowering::getJumpConditionMergingParams(Instruction::BinaryOps Opc,
   return {-1, -1, -1};
 }
 
-SDValue SystemZTargetLowering::combineBR_CCMASK(
-    SDNode *N, DAGCombinerInfo &DCI) const {
+SDValue SystemZTargetLowering::combineTM(SDNode *N,
+                                         DAGCombinerInfo &DCI) const {
+  SelectionDAG &DAG = DCI.DAG;
+  auto *TMOp0Node = N->getOperand(0).getNode();
+  auto *TMOp1Const = dyn_cast<ConstantSDNode>(N->getOperand(1));
+  auto *TMOp2Const = dyn_cast<ConstantSDNode>(N->getOperand(2));
+  if (!TMOp0Node || !TMOp1Const || !TMOp2Const ||
+      // Third operand of TM is false.
+      TMOp2Const->getZExtValue() != 0)
+    return SDValue();
+  auto TMOp1ConstVal = TMOp1Const->getZExtValue();
+  // Optimize (TM (IPM)).
+  if (TMOp0Node->getOpcode() == SystemZISD::IPM) {
+    int CCMask, CCValid;
+    if (TMOp1ConstVal == (1 << SystemZ::IPM_CC))
+      CCMask = SystemZ::CCMASK_CMP_GE;
+    else if (TMOp1ConstVal == (1 << (SystemZ::IPM_CC + 1)))
+      CCMask = SystemZ::CCMASK_CMP_LE;
+    else
+      return SDValue();
+    SDValue CCReg = TMOp0Node->getOperand(0);
+    CCValid = SystemZ::CCMASK_ANY;
+    // Return combined node.
+    return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
+                       N->getOperand(0), N->getOperand(1),
+                       DAG.getTargetConstant(CCValid, SDLoc(N), MVT::i32),
+                       DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
+                       CCReg);
+  }
+  // Optimize (TM (XOR (Op0 Op1))).
+  if (TMOp0Node->getOpcode() == ISD::XOR) {
+    auto *XorOp0 = TMOp0Node->getOperand(0).getNode();
+    // Op0. (SELECT_CCMASK (ICMP (SRL (IPM)))).
+    // Op1. (SRL (IPM (CC))).
+    if (XorOp0 && XorOp0->getOpcode() == SystemZISD::SELECT_CCMASK) {
+      auto *XorOp0CCValid = dyn_cast<ConstantSDNode>(XorOp0->getOperand(2));
+      auto *XorOp0CCMask = dyn_cast<ConstantSDNode>(XorOp0->getOperand(3));
+      if (!XorOp0CCValid || !XorOp0CCMask)
+        return SDValue();
+      SDValue XorOp0CCReg = XorOp0->getOperand(4);
+      int XorOp0CCMaskVal = XorOp0CCMask->getZExtValue();
+      int XorOp0CCValidVal = XorOp0CCValid->getZExtValue();
+      int CCMask = SystemZ::CCMASK_CMP_EQ, CCValid, TMCCMask;
+      SDValue CCReg = TMOp0Node->getOperand(1);
+      // (SELECT_CCMASK (ICMP (SRL (IPM)))).
+      if (!combineCCMask(XorOp0CCReg, XorOp0CCValidVal, XorOp0CCMaskVal) ||
+          // (SRL (IPM (CC))).
+          !combineCCMask(CCReg, CCValid, CCMask))
+        return SDValue();
+      auto *N0 = XorOp0CCReg.getNode(), *N1 = CCReg.getNode();
+      // Check if Op0 and Op1 point to the same CC.
+      if (!N0 || !N1 || N0 != N1)
+        return SDValue();
+      if (TMOp1ConstVal == 1)
+        TMCCMask = SystemZ::CCMASK_CMP_GE;
+      else
+        return SDValue();
+      // CCMask ^ XorOp0CCMaskVal = TMCCMask..
+      CCMask = XorOp0CCMaskVal ^ TMCCMask;
+      // Returned combined node with evaluated CCMask.
+      return DAG.getNode(
+          SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
+          XorOp0->getOperand(0), XorOp0->getOperand(1),
+          DAG.getTargetConstant(XorOp0CCValidVal, SDLoc(N), MVT::i32),
+          DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32), CCReg);
+    }
+  }
+  return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineAND(SDNode *N,
+                                          DAGCombinerInfo &DCI) const {
+  SelectionDAG &DAG = DCI.DAG;
+  auto *AndOp0 = N->getOperand(0).getNode();
+  auto *AndOp1 = N->getOperand(1).getNode();
+  if (!AndOp0 || !AndOp1)
+    return SDValue();
+  // Both Operands of ISD::AND are SystemZISD::SELECT_CCMASK.
+  // And CCMask of both operands and check if they points to the
+  // same CC and update CCReg. Return combined SDNode.
+  if (AndOp0->getOpcode() == SystemZISD::SELECT_CCMASK &&
+      AndOp1->getOpcode() == SystemZISD::SELECT_CCMASK) {
+    auto *Op0CCValid = dyn_cast<ConstantSDNode>(AndOp0->getOperand(2));
+    auto *Op0CCMask = dyn_cast<ConstantSDNode>(AndOp0->getOperand(3));
+    auto *Op1CCValid = dyn_cast<ConstantSDNode>(AndOp1->getOperand(2));
+    auto *Op1CCMask = dyn_cast<ConstantSDNode>(AndOp1->getOperand(3));
+    if (!Op0CCValid || !Op1CCValid || !Op0CCMask || !Op1CCMask)
+      return SDValue();
+    int Op0CCValidVal = Op0CCValid->getZExtValue();
+    int Op1CCValidVal = Op1CCValid->getZExtValue();
+    // Check if both AndOp0 and AndOp1 have aleady been combined.
+    if (Op0CCValidVal != SystemZ::CCMASK_ANY ||
+        Op1CCValidVal != SystemZ::CCMASK_ANY)
+      return SDValue();
+    SDValue Op0CCReg = AndOp0->getOperand(4), Op1CCReg = AndOp1->getOperand(4);
+    auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = Op1CCReg.getNode();
+    // Check if AndOp0 and AndOp1 refers to same CC.
+    if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
+      return SDValue();
+    int Op0CCMaskVal = Op0CCMask->getZExtValue();
+    int Op1CCMaskVal = Op1CCMask->getZExtValue();
+    int CCMask = Op0CCMaskVal & Op1CCMaskVal;
+    return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
+                       AndOp0->getOperand(0), AndOp0->getOperand(1),
+                       DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
+                       DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
+                       Op0CCReg);
+  } else if (AndOp0->getOpcode() == SystemZISD::SELECT_CCMASK) {
+    // check AndOp1 for (SRL (IPM (CC))) pattern. Second operand is CC.
+    SDValue CCReg = N->getOperand(1);
+    int CCMask = SystemZ::CCMASK_CMP_EQ, CCValid;
+    if (!combineCCMask(CCReg, CCValid, CCMask))
+      return SDValue();
+    SDValue Op0CCReg = AndOp0->getOperand(4);
+    auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = CCReg.getNode();
+    // Check if AndOp0 and AndOp1 refers to same CC.
+    if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
+      return SDValue();
+    auto *Op0CCValid = dyn_cast<ConstantSDNode>(AndOp0->getOperand(2));
+    auto *Op0CCMask = dyn_cast<ConstantSDNode>(AndOp0->getOperand(3));
+    int Op0CCValidVal = Op0CCValid->getZExtValue();
+    if (!Op0CCMask || !Op0CCValid || Op0CCValidVal != SystemZ::CCMASK_ANY)
+      return SDValue();
+    int Op0CCMaskVal = Op0CCMask->getZExtValue();
+    // Op0CCMaskVal & CCMask = 0. Invert Op0CCMaskVal.
+    int CCMaskVal = Op0CCMaskVal ^ 0xf;
+    assert(CCMaskVal < 4 && "CC out of range");
+    CCMask = 1 << (3 - CCMaskVal);
+    return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
+                       AndOp0->getOperand(0), AndOp0->getOperand(1),
+                       DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
+                       DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
+                       CCReg);
+  }
+  return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineOR(SDNode *N,
+                                         DAGCombinerInfo &DCI) const {
+  SelectionDAG &DAG = DCI.DAG;
+  auto *OrOp0 = N->getOperand(0).getNode();
+  auto *OrOp1 = N->getOperand(1).getNode();
+  if (!OrOp0 || !OrOp1)
+    return SDValue();
+  // Both Operands of ISD::OR are SystemZISD::SELECT_CCMASK.
+  // And CCMask of both operands and check if they points to the
+  // same CC and update CCReg. Return combined SDNode.
+  if (OrOp0->getOpcode() == SystemZISD::SELECT_CCMASK &&
+      OrOp1->getOpcode() == SystemZISD::SELECT_CCMASK) {
+    auto *Op0CCValid = dyn_cast<ConstantSDNode>(OrOp0->getOperand(2));
+    auto *Op0CCMask = dyn_cast<ConstantSDNode>(OrOp0->getOperand(3));
+    auto *Op1CCValid = dyn_cast<ConstantSDNode>(OrOp1->getOperand(2));
+    auto *Op1CCMask = dyn_cast<ConstantSDNode>(OrOp1->getOperand(3));
+    if (!Op0CCValid || !Op1CCValid || !Op0CCMask || !Op1CCMask)
+      return SDValue();
+    int Op0CCValidVal = Op0CCValid->getZExtValue();
+    int Op1CCValidVal = Op1CCValid->getZExtValue();
+    // Check if both OrOp0 and OrOp1 have aleady been combined.
+    if (Op0CCValidVal != SystemZ::CCMASK_ANY ||
+        Op1CCValidVal != SystemZ::CCMASK_ANY)
+      return SDValue();
+    SDValue Op0CCReg = OrOp0->getOperand(4), Op1CCReg = OrOp1->getOperand(4);
+    auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = Op1CCReg.getNode();
+    // Check if OrOp0 and OrOp0 refers to same CC.
+    if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
+      return SDValue();
+    int Op0CCMaskVal = Op0CCMask->getZExtValue();
+    int Op1CCMaskVal = Op1CCMask->getZExtValue();
+    // Oring Masks.
+    int CCMask = Op0CCMaskVal | Op1CCMaskVal;
+    return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
+                       OrOp0->getOperand(0), OrOp1->getOperand(1),
+                       DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
+                       DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
+                       Op0CCReg);
+  }
+  return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineICMP(SDNode *N,
+                                           DAGCombinerInfo &DCI) const {
+  SelectionDAG &DAG = DCI.DAG;
+  auto *LHS = N->getOperand(0).getNode();
+  if (!LHS)
+    return SDValue();
+  auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
+  // Handle the case where RHS is const.
+  // Remove redundant ICMP left after operand 0 pattern has been combined by
+  // by combineAND, combineXOR, combineOR into select_cc on the assumption
+  // CmpVal is zero.
+  if (RHS && LHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
+    auto *CCValid = dyn_cast<ConstantSDNode>(LHS->getOperand(2));
+    int CmpVal = RHS->getZExtValue();
+    // Check CmpVal is zero.
+    if (!CCValid || CCValid->getZExtValue() != SystemZ::CCMASK_ANY ||
+        CmpVal != 0)
+      return SDValue();
+    // Return operand 0 - Combined select_cc.
+    return N->getOperand(0);
+  }
+  if (RHS)
+    return SDValue();
+  // Handle the case where RHS is also expression.
+  // Optimize (ICMP (SELECT_CCMASK AND (SRL (IPM (CC))))).
+  // ICMP: Op0 - SELECT_CCMASK and Op1 - (AND (SRL (IPM (CC)))).
+  if (LHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
+    auto *SelectCCValid = dyn_cast<ConstantSDNode>(LHS->getOperand(2));
+    auto *SelectCCMask = dyn_cast<ConstantSDNode>(LHS->getOperand(3));
+    // Check if select_cc has already been combined.
+    if (!SelectCCValid || !SelectCCMask ||
+        SelectCCValid->getZExtValue() != SystemZ::CCMASK_ANY)
+      return SDValue();
+    auto *RHS = N->getOperand(1).getNode();
+    if (RHS && RHS->getOpcode() == ISD::AND) {
+      SDValue AndCCReg = RHS->getOperand(0);
+      auto *AndConst = dyn_cast<ConstantSDNode>(RHS->getOperand(1));
+      if (!AndConst || AndConst->getZExtValue() != 1)
+        return SDValue();
+      int AndCCMask = SystemZ::CCMASK_CMP_EQ, AndCCValid;
+      // (AND (SRL (IPM (CC)))). Anding bit 28.
+      if (!combineCCMask(AndCCReg, AndCCValid, AndCCMask))
+        return SDValue();
+      // Anding bit 28 false => CC == 0 || CC == 2.
+      AndCCMask = SystemZ::CCMASK_CMP_GE;
+      auto *N1 = AndCCReg.getNode(), *N2 = LHS->getOperand(4).getNode();
+      if (!N1 || !N2 || N1 != N2)
+        return SDValue();
+      // CCMask for comparing AndCCMask and SelectCCMasK.
+      int CCMask = AndCCMask ^ SelectCCMask->getZExtValue();
+      AndCCValid = SystemZ::CCMASK_ANY;
+      return DAG.getNode(
+          SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
+          LHS->getOperand(0), LHS->getOperand(1),
+          DAG.getTargetConstant(AndCCValid, SDLoc(N), MVT::i32),
+          DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32), AndCCReg);
+    }
+  }
+  return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineXOR(SDNode *N,
+                                          DAGCombinerInfo &DCI) const {
+  SelectionDAG &DAG = DCI.DAG;
+  auto *XorOp0 = N->getOperand(0).getNode();
+  auto *XorOp1 = N->getOperand(1).getNode();
+  if (!XorOp0 || !XorOp1)
+    return SDValue();
+  // Both Operands of ISD::XOR are SystemZISD::SELECT_CCMASK.
+  // And CCMask of both operands and check if they points to the
+  // same CC and update CCReg. Return combined SDNode.
+  if (XorOp0->getOpcode() == SystemZISD::SELECT_CCMASK &&
+      XorOp1->getOpcode() == SystemZISD::SELECT_CCMASK) {
+    auto *Op0CCValid = dyn_cast<ConstantSDNode>(XorOp0->getOperand(2));
+    auto *Op0CCMask = dyn_cast<ConstantSDNode>(XorOp0->getOperand(3));
+    auto *Op1CCValid = dyn_cast<ConstantSDNode>(XorOp1->getOperand(2));
+    auto *Op1CCMask = dyn_cast<ConstantSDNode>(XorOp1->getOperand(3));
+    if (!Op0CCValid || !Op1CCValid || !Op0CCMask || !Op1CCMask)
+      return SDValue();
+    int Op0CCValidVal = Op0CCValid->getZExtValue();
+    int Op1CCValidVal = Op1CCValid->getZExtValue();
+    // Check if both XorOp0 and XorOp1 have aleady been combined.
+    if (Op0CCValidVal != SystemZ::CCMASK_ANY ||
+        Op1CCValidVal != SystemZ::CCMASK_ANY)
+      return SDValue();
+    SDValue Op0CCReg = XorOp0->getOperand(4), Op1CCReg = XorOp1->getOperand(4);
+    auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = Op1CCReg.getNode();
+    // Same CC node.
+    if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
+      return SDValue();
+    int Op0CCMaskVal = Op0CCMask->getZExtValue();
+    int Op1CCMaskVal = Op1CCMask->getZExtValue();
+    // Xor the masks.
+    int CCMask = Op0CCMaskVal ^ Op1CCMaskVal;
+    return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
+                       XorOp0->getOperand(0), XorOp1->getOperand(1),
+                       DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
+                       DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
+                       Op0CCReg);
+  }
+  return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineBR_CCMASK(SDNode *N,
+                                                DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
 
   // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK.
@@ -9385,22 +9424,13 @@ SDValue SystemZTargetLowering::combineBR_CCMASK(
 
 SDValue SystemZTargetLowering::combineSELECT_CCMASK(
     SDNode *N, DAGCombinerInfo &DCI) const {
-  // Try to combine two select_cc if following two conditions are met.
-  // 1. Subtree select_cc has aleady been combined/evaluated for flag output
-  // operand srl/ipm sequence.
-  // 2. One of True/False operand of select_cc is not constant.
-  // In case it is true, we take early exit returning combined select_cc with
-  // computed CCMask.
-  // Why haven't we apply the same logic in combineBR_CCMASK above? We can add
-  // following code combineSELECT_CC_CCIPMMask in combineBR_CCMASK as well, but
-  // it would not be used as it would call combineSELECT_CCMASK to combine two
-  // select_cc.
+  SelectionDAG &DAG = DCI.DAG;
+  // Handle the case where select_cc_outer has TrueVal or FalseVal has
+  // altredy combined select_cc_nested.
   SDValue Res = combineSELECT_CC_CCIPMMask(N, DCI);
   if (Res != SDValue())
     return Res;
 
-  SelectionDAG &DAG = DCI.DAG;
-
   // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK.
   auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(2));
   auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(3));
@@ -9410,7 +9440,6 @@ SDValue SystemZTargetLowering::combineSELECT_CCMASK(
   int CCValidVal = CCValid->getZExtValue();
   int CCMaskVal = CCMask->getZExtValue();
   SDValue CCReg = N->getOperand(4);
-
   if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
     return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
                        N->getOperand(0), N->getOperand(1),
@@ -9420,7 +9449,6 @@ SDValue SystemZTargetLowering::combineSELECT_CCMASK(
   return SDValue();
 }
 
-
 SDValue SystemZTargetLowering::combineGET_CCMASK(
     SDNode *N, DAGCombinerInfo &DCI) const {
 
@@ -9743,6 +9771,16 @@ SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
   case ISD::UREM:               return combineIntDIVREM(N, DCI);
   case ISD::INTRINSIC_W_CHAIN:
   case ISD::INTRINSIC_VOID:     return combineINTRINSIC(N, DCI);
+  case ISD::AND:
+    return combineAND(N, DCI);
+  case SystemZISD::ICMP:
+    return combineICMP(N, DCI);
+  case ISD::OR:
+    return combineOR(N, DCI);
+  case SystemZISD::TM:
+    return combineTM(N, DCI);
+  case ISD::XOR:
+    return combineXOR(N, DCI);
   }
 
   return SDValue();
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index b9896d2131dfb..424dd530a0b67 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -799,6 +799,11 @@ class SystemZTargetLowering : public TargetLowering {
   SDValue combineMUL(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue combineAND(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue combineICMP(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue combineOR(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue combineTM(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue combineXOR(SDNode *N, DAGCombinerInfo &DCI) const;
 
   SDValue unwrapAddress(SDValue N) const override;
 
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
index cce7e1150aa95..6444cd91f76d4 100644
--- a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
@@ -777,19 +777,10 @@ define signext range(i32 0, 43) i32 @bar1_023_OR_AND(i32 noundef signext %x) {
 ; CHECK-NEXT:    ahi %r2, 42
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    lhi %r0, 0
-; CHECK-NEXT:    jno .LBB34_3
-; CHECK-NEXT:  # %bb.1: # %entry
-; CHECK-NEXT:    jnhe .LBB34_4
-; CHECK-NEXT:  .LBB34_2: # %entry
-; CHECK-NEXT:    llgfr %r2, %r0
-; CHECK-NEXT:    br %r14
-; CHECK-NEXT:  .LBB34_3: # %entry
-; CHECK-NEXT:    lhi %r0, 42
-; CHECK-NEXT:    jhe .LBB34_2
-; CHECK-NEXT:  .LBB34_4: # %entry
-; CHECK-NEXT:    lhi %r0, 0
-; CHECK-NEXT:    llgfr %r2, %r0
+; CHECK-NEXT:    lghi %r2, 42
+; CHECK-NEXT:    bher %r14
+; CHECK-NEXT:  .LBB34_1: # %entry
+; CHECK-NEXT:    lghi %r2, 0
 ; CHECK-NEXT:    br %r14
 entry:
   %0 = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
@@ -798,10 +789,8 @@ entry:
   tail call void @llvm.assume(i1 %1)
   %2 = and i32 %asmresult1, 1
   %or.cond = icmp eq i32 %2, 0
-  %cmp3.not = icmp eq i32 %asmresult1, 3
-  %3 = select i1 %cmp3.not, i32 0, i32 42
-  %cond = select i1 %or.cond, i32 %3, i32 0
-  ret i32 %cond
+  %spec.select = select i1 %or.cond, i32 42, i32 0
+  ret i32 %spec.select
 }
 
 ; Test (((cc == 1) || (cc == 2)) && (cc != 3))

>From 0dc69956fc7ce44c52c846d27af0b33ad4ec4349 Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Wed, 25 Jun 2025 03:14:27 +0200
Subject: [PATCH 10/12] Incorporated code review feedback. 1. Remove check for
 CCValid if it has already been combined. 2. Added logic to check if
 TrueVal/FalseVal of two select_ccmask are matching    or inverted.

---
 .../Target/SystemZ/SystemZISelLowering.cpp    | 119 ++++++++++++------
 1 file changed, 81 insertions(+), 38 deletions(-)

diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 52eea424dd210..b8fdb7d96b44d 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -8799,9 +8799,7 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
       return false;
     auto *CCNValid = dyn_cast<ConstantSDNode>(CCNode->getOperand(2));
     auto *CCNMask = dyn_cast<ConstantSDNode>(CCNode->getOperand(3));
-    // Check if select_cc has already been combined.
-    if (!CCNValid || !CCNMask ||
-        CCNValid->getZExtValue() != SystemZ::CCMASK_ANY)
+    if (!CCNValid || !CCNMask)
       return false;
     CCValid = SystemZ::CCMASK_ANY;
     CCMask = CCNMask->getZExtValue();
@@ -8970,9 +8968,6 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
   // Check if CCOp1 and CCOp2 refers to the same CC condition node.
   const auto isSameCCIPMOp = [](SDValue &CCOp1, SDValue &CCOp2,
                                 int &CCValidVal) {
-    // Already combined sequence.
-    if (CCValidVal != SystemZ::CCMASK_ANY)
-      return false;
     SDNode *N1 = CCOp1.getNode(), *N2 = CCOp2.getNode();
     return N1 && N2 && N1 == N2;
   };
@@ -8988,7 +8983,6 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
 
   SDValue NestedCCOp = OuterTrueVal ? N->getOperand(1) : N->getOperand(0);
   auto *NestedCCNode = NestedCCOp.getNode();
-  // check if nested select_cc_b has already been combined.
   if (!NestedCCNode || NestedCCNode->getOpcode() != SystemZISD::SELECT_CCMASK)
     return SDValue();
 
@@ -9031,10 +9025,9 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
   // of select_cc_a points to select_cc_b. We return select_cc with TrueVal
   // and FalseVal from select_cc_b with combined CCMask.
   // One of OuterTrueVal or OuterFalseVal has select_cc_b.
+  // And both points to the same CC.
   if ((OuterTrueVal != nullptr) ^ (OuterFalseVal != nullptr)) {
-    // Both OuterCCValidVal and NestedCCValidVal have already been combined.
-    if (OuterCCValidVal == SystemZ::CCMASK_ANY &&
-        // And both points to the same CC.
+    if (OuterCCValidVal == NestedCCValidVal &&
         isSameCCIPMOp(OuterCCReg, NestedCCReg, NestedCCValidVal)) {
       CCMask |= NestedCCMaskVal;
       // NestedCCOp has both operands constants.
@@ -9052,10 +9045,7 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
   // CC as nested CC.
   // Outer select_cc has yet not been combined.
   if (OuterCCValidVal != SystemZ::CCMASK_ICMP ||
-      // Try combining outer select_cc.
       !combineCCMask(OuterCCReg, OuterCCValidVal, OuterCCMaskVal) ||
-      // Check nested select_cc has already been combined and points to
-      // same Condtiion code as outer select_cc.
       !isSameCCIPMOp(OuterCCReg, NestedCCReg, NestedCCValidVal))
     return SDValue();
   // Check if nested select_cc original CCMask was CCMASK_CMP_EQ.
@@ -9166,7 +9156,7 @@ SDValue SystemZTargetLowering::combineTM(SDNode *N,
           !combineCCMask(CCReg, CCValid, CCMask))
         return SDValue();
       auto *N0 = XorOp0CCReg.getNode(), *N1 = CCReg.getNode();
-      // Check if Op0 and Op1 point to the same CC.
+      // Op0 and Op1 should point to the same CC.
       if (!N0 || !N1 || N0 != N1)
         return SDValue();
       if (TMOp1ConstVal == 1)
@@ -9175,7 +9165,7 @@ SDValue SystemZTargetLowering::combineTM(SDNode *N,
         return SDValue();
       // CCMask ^ XorOp0CCMaskVal = TMCCMask..
       CCMask = XorOp0CCMaskVal ^ TMCCMask;
-      // Returned combined node with evaluated CCMask.
+      // Return combined node with combined CCMask.
       return DAG.getNode(
           SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
           XorOp0->getOperand(0), XorOp0->getOperand(1),
@@ -9194,7 +9184,7 @@ SDValue SystemZTargetLowering::combineAND(SDNode *N,
   if (!AndOp0 || !AndOp1)
     return SDValue();
   // Both Operands of ISD::AND are SystemZISD::SELECT_CCMASK.
-  // And CCMask of both operands and check if they points to the
+  // And CCMask of both operands and they should point to the
   // same CC and update CCReg. Return combined SDNode.
   if (AndOp0->getOpcode() == SystemZISD::SELECT_CCMASK &&
       AndOp1->getOpcode() == SystemZISD::SELECT_CCMASK) {
@@ -9206,38 +9196,52 @@ SDValue SystemZTargetLowering::combineAND(SDNode *N,
       return SDValue();
     int Op0CCValidVal = Op0CCValid->getZExtValue();
     int Op1CCValidVal = Op1CCValid->getZExtValue();
-    // Check if both AndOp0 and AndOp1 have aleady been combined.
-    if (Op0CCValidVal != SystemZ::CCMASK_ANY ||
-        Op1CCValidVal != SystemZ::CCMASK_ANY)
+    if (Op0CCValidVal != Op1CCValidVal)
       return SDValue();
     SDValue Op0CCReg = AndOp0->getOperand(4), Op1CCReg = AndOp1->getOperand(4);
     auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = Op1CCReg.getNode();
-    // Check if AndOp0 and AndOp1 refers to same CC.
+    // AndOp0 and AndOp1 should refer to same CC.
     if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
       return SDValue();
+    // Verifying True/False of AndOp0 and AndOp1 matching or inverted.
+    bool Invert = false;
+    auto *Op0TrueVal = dyn_cast<ConstantSDNode>(AndOp0->getOperand(0));
+    auto *Op0FalseVal = dyn_cast<ConstantSDNode>(AndOp0->getOperand(1));
+    auto *Op1TrueVal = dyn_cast<ConstantSDNode>(AndOp1->getOperand(0));
+    auto *Op1FalseVal = dyn_cast<ConstantSDNode>(AndOp1->getOperand(1));
+    if (!Op0TrueVal || !Op0FalseVal || !Op1TrueVal || !Op1FalseVal)
+      return SDValue();
+    if (Op0TrueVal->getZExtValue() == Op1FalseVal->getZExtValue() ||
+        Op1TrueVal->getZExtValue() == Op0FalseVal->getZExtValue())
+      Invert = !Invert;
+    else if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
+             Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
+      return SDValue();
     int Op0CCMaskVal = Op0CCMask->getZExtValue();
     int Op1CCMaskVal = Op1CCMask->getZExtValue();
     int CCMask = Op0CCMaskVal & Op1CCMaskVal;
+    if (Invert)
+      CCMask ^= SystemZ::CCMASK_ANY;
     return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
                        AndOp0->getOperand(0), AndOp0->getOperand(1),
                        DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
                        DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
                        Op0CCReg);
   } else if (AndOp0->getOpcode() == SystemZISD::SELECT_CCMASK) {
-    // check AndOp1 for (SRL (IPM (CC))) pattern. Second operand is CC.
+    // AndOp1 is (SRL (IPM (CC))) pattern. Second operand is CC.
     SDValue CCReg = N->getOperand(1);
     int CCMask = SystemZ::CCMASK_CMP_EQ, CCValid;
     if (!combineCCMask(CCReg, CCValid, CCMask))
       return SDValue();
     SDValue Op0CCReg = AndOp0->getOperand(4);
     auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = CCReg.getNode();
-    // Check if AndOp0 and AndOp1 refers to same CC.
+    // AndOp0 and AndOp1 should refer to same CC.
     if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
       return SDValue();
     auto *Op0CCValid = dyn_cast<ConstantSDNode>(AndOp0->getOperand(2));
     auto *Op0CCMask = dyn_cast<ConstantSDNode>(AndOp0->getOperand(3));
     int Op0CCValidVal = Op0CCValid->getZExtValue();
-    if (!Op0CCMask || !Op0CCValid || Op0CCValidVal != SystemZ::CCMASK_ANY)
+    if (!Op0CCMask || !Op0CCValid)
       return SDValue();
     int Op0CCMaskVal = Op0CCMask->getZExtValue();
     // Op0CCMaskVal & CCMask = 0. Invert Op0CCMaskVal.
@@ -9261,7 +9265,7 @@ SDValue SystemZTargetLowering::combineOR(SDNode *N,
   if (!OrOp0 || !OrOp1)
     return SDValue();
   // Both Operands of ISD::OR are SystemZISD::SELECT_CCMASK.
-  // And CCMask of both operands and check if they points to the
+  // And CCMask of both operands and they should point to the
   // same CC and update CCReg. Return combined SDNode.
   if (OrOp0->getOpcode() == SystemZISD::SELECT_CCMASK &&
       OrOp1->getOpcode() == SystemZISD::SELECT_CCMASK) {
@@ -9273,19 +9277,33 @@ SDValue SystemZTargetLowering::combineOR(SDNode *N,
       return SDValue();
     int Op0CCValidVal = Op0CCValid->getZExtValue();
     int Op1CCValidVal = Op1CCValid->getZExtValue();
-    // Check if both OrOp0 and OrOp1 have aleady been combined.
-    if (Op0CCValidVal != SystemZ::CCMASK_ANY ||
-        Op1CCValidVal != SystemZ::CCMASK_ANY)
+    if (Op0CCValidVal != Op1CCValidVal)
       return SDValue();
     SDValue Op0CCReg = OrOp0->getOperand(4), Op1CCReg = OrOp1->getOperand(4);
     auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = Op1CCReg.getNode();
-    // Check if OrOp0 and OrOp0 refers to same CC.
+    // OrOp0 and OrOp0 should point to same CC.
     if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
       return SDValue();
+    // Checking True/False of OrOp0 and OrOp1 matching or inverted.
+    bool Invert = false;
+    auto *Op0TrueVal = dyn_cast<ConstantSDNode>(OrOp0->getOperand(0));
+    auto *Op0FalseVal = dyn_cast<ConstantSDNode>(OrOp0->getOperand(1));
+    auto *Op1TrueVal = dyn_cast<ConstantSDNode>(OrOp1->getOperand(0));
+    auto *Op1FalseVal = dyn_cast<ConstantSDNode>(OrOp1->getOperand(1));
+    if (!Op0TrueVal || !Op0FalseVal || !Op1TrueVal || !Op1FalseVal)
+      return SDValue();
+    if (Op0TrueVal->getZExtValue() == Op1FalseVal->getZExtValue() ||
+        Op1TrueVal->getZExtValue() == Op0FalseVal->getZExtValue())
+      Invert = !Invert;
+    else if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
+             Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
+      return SDValue();
     int Op0CCMaskVal = Op0CCMask->getZExtValue();
     int Op1CCMaskVal = Op1CCMask->getZExtValue();
     // Oring Masks.
     int CCMask = Op0CCMaskVal | Op1CCMaskVal;
+    if (Invert)
+      CCMask ^= SystemZ::CCMASK_ANY;
     return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
                        OrOp0->getOperand(0), OrOp1->getOperand(1),
                        DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
@@ -9306,14 +9324,24 @@ SDValue SystemZTargetLowering::combineICMP(SDNode *N,
   // Remove redundant ICMP left after operand 0 pattern has been combined by
   // by combineAND, combineXOR, combineOR into select_cc on the assumption
   // CmpVal is zero.
+  // Why is redundant ICMP created?
+  // This redundant 'icmp' is created as we are combining '(icmp (and...))',
+  // 'icmp (or...))', or 'icmp (xor...))' in combineAND, combineOR, or
+  // combineXOR at 'and', 'or', 'xor' level and select_ccmask is returned.
+  // e.g t10 is returned by combineOR/XOR/AND.
+  // t10: (select_ccmask VAL1 VAL2 CCMASK_ANY CCOp)
+  // (icmp t10 0)
+  // Following code return t10, entire select_ccmask.
+  // This dangling icmp can be avoided if we combine entire sequence at 'icmp'
+  // level. But downside of this approach is most of this code will be pulled
+  // into combineICMP and might leave some of the added pattern to
+  // PerformDAGCombine useless.
   if (RHS && LHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
     auto *CCValid = dyn_cast<ConstantSDNode>(LHS->getOperand(2));
     int CmpVal = RHS->getZExtValue();
     // Check CmpVal is zero.
-    if (!CCValid || CCValid->getZExtValue() != SystemZ::CCMASK_ANY ||
-        CmpVal != 0)
+    if (!CCValid || CmpVal != 0)
       return SDValue();
-    // Return operand 0 - Combined select_cc.
     return N->getOperand(0);
   }
   if (RHS)
@@ -9324,9 +9352,7 @@ SDValue SystemZTargetLowering::combineICMP(SDNode *N,
   if (LHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
     auto *SelectCCValid = dyn_cast<ConstantSDNode>(LHS->getOperand(2));
     auto *SelectCCMask = dyn_cast<ConstantSDNode>(LHS->getOperand(3));
-    // Check if select_cc has already been combined.
-    if (!SelectCCValid || !SelectCCMask ||
-        SelectCCValid->getZExtValue() != SystemZ::CCMASK_ANY)
+    if (!SelectCCValid || !SelectCCMask)
       return SDValue();
     auto *RHS = N->getOperand(1).getNode();
     if (RHS && RHS->getOpcode() == ISD::AND) {
@@ -9364,7 +9390,7 @@ SDValue SystemZTargetLowering::combineXOR(SDNode *N,
   if (!XorOp0 || !XorOp1)
     return SDValue();
   // Both Operands of ISD::XOR are SystemZISD::SELECT_CCMASK.
-  // And CCMask of both operands and check if they points to the
+  // And CCMask of both operands and they should point to the
   // same CC and update CCReg. Return combined SDNode.
   if (XorOp0->getOpcode() == SystemZISD::SELECT_CCMASK &&
       XorOp1->getOpcode() == SystemZISD::SELECT_CCMASK) {
@@ -9376,19 +9402,36 @@ SDValue SystemZTargetLowering::combineXOR(SDNode *N,
       return SDValue();
     int Op0CCValidVal = Op0CCValid->getZExtValue();
     int Op1CCValidVal = Op1CCValid->getZExtValue();
-    // Check if both XorOp0 and XorOp1 have aleady been combined.
-    if (Op0CCValidVal != SystemZ::CCMASK_ANY ||
-        Op1CCValidVal != SystemZ::CCMASK_ANY)
+    if (Op0CCValidVal != Op1CCValidVal)
       return SDValue();
     SDValue Op0CCReg = XorOp0->getOperand(4), Op1CCReg = XorOp1->getOperand(4);
     auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = Op1CCReg.getNode();
     // Same CC node.
     if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
       return SDValue();
+    // XorOp0 and XorOp0 should point to same CC.
+    if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
+      return SDValue();
+    // Checking True/False of XorOp0 and XorOp1 matching or inverted.
+    bool Invert = false;
+    auto *Op0TrueVal = dyn_cast<ConstantSDNode>(XorOp0->getOperand(0));
+    auto *Op0FalseVal = dyn_cast<ConstantSDNode>(XorOp0->getOperand(1));
+    auto *Op1TrueVal = dyn_cast<ConstantSDNode>(XorOp1->getOperand(0));
+    auto *Op1FalseVal = dyn_cast<ConstantSDNode>(XorOp1->getOperand(1));
+    if (!Op0TrueVal || !Op0FalseVal || !Op1TrueVal || !Op1FalseVal)
+      return SDValue();
+    if (Op0TrueVal->getZExtValue() == Op1FalseVal->getZExtValue() ||
+        Op1TrueVal->getZExtValue() == Op0FalseVal->getZExtValue())
+      Invert = !Invert;
+    else if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
+             Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
+      return SDValue();
     int Op0CCMaskVal = Op0CCMask->getZExtValue();
     int Op1CCMaskVal = Op1CCMask->getZExtValue();
     // Xor the masks.
     int CCMask = Op0CCMaskVal ^ Op1CCMaskVal;
+    if (Invert)
+      CCMask ^= SystemZ::CCMASK_ANY;
     return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
                        XorOp0->getOperand(0), XorOp1->getOperand(1),
                        DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),

>From e2467a6b78f0eda4d9af6c7728665932231b1903 Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Mon, 30 Jun 2025 02:30:11 +0200
Subject: [PATCH 11/12] Incorporated code review feedback. 1. Put outer
 select_ccmask and br_ccmask back to worklist so as to allow    processing
 from outer select_ccmask/br_ccmask to combine with    (icmp (select_ccmask)).
 2. Remove combineICMP.

---
 .../Target/SystemZ/SystemZISelLowering.cpp    | 370 +++++++++---------
 llvm/lib/Target/SystemZ/SystemZISelLowering.h |   1 -
 .../SystemZ/flag_output_operand_ccmixed.ll    |  18 +-
 .../flag_output_operand_ccmixed_eq_noteq.ll   |   7 +-
 4 files changed, 201 insertions(+), 195 deletions(-)

diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index b8fdb7d96b44d..5728ee45b3f29 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -8729,14 +8729,13 @@ SDValue SystemZTargetLowering::combineSETCC(
   return SDValue();
 }
 
-// Combine IPM sequence for flag output operands.
 static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
   // CCMask for ICmp is equal to 0, 1, 2 or 3.
   const auto CCMaskForICmpEQCCVal = [](unsigned CC) {
     assert(CC < 4 && "CC out of range");
     return 1 << (3 - CC);
   };
-  // Convert CCVal to CCMask and update it.
+  // Convert CCVal to CCMask based on CCMask and update it.
   const auto convertCCValToCCMask = [&](int CCVal) {
     bool Invert = false;
     if (CCMask == SystemZ::CCMASK_CMP_NE)
@@ -8779,32 +8778,29 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
   if (isSRL_IPM_CCSequence(CCNode))
     return true;
 
-  // This code is common to both combineBR_CCMASK and combineSELECT_CCMASK
-  // Two cases of combining already combined select_cc.
-  // a. Combining br_cc and already combined select_cc.
-  // b. Combining select_cc_a and already combines select_cc_b,
-  // select_cc_a must have both TrueVal and FalseVal Constant.
-  // We return Nested CCMask and updated link to CCReg.
-  // Cases for non-const TrueVal/False Value already have been handled
-  // in combineSELECT_CC_CCIPMMask.
+  // Combine CCMASK_TM with select_ccmask.
   if (CCNode->getOpcode() == SystemZISD::SELECT_CCMASK) {
     bool Invert = false;
-    // Outer select_cc is TM.
-    if (CCValid == SystemZ::CCMASK_TM) {
-      if (CCMask == SystemZ::CCMASK_TM_SOME_1)
-        Invert = !Invert;
-      else if (CCMask != SystemZ::CCMASK_TM_ALL_0)
-        return false;
-    } else if (CCValid != SystemZ::CCMASK_ICMP)
+
+    // Outer select_ccmask is TM.
+    if (CCValid != SystemZ::CCMASK_TM)
       return false;
+    if (CCMask == SystemZ::CCMASK_TM_SOME_1)
+      Invert = !Invert;
+    else if (CCMask != SystemZ::CCMASK_TM_ALL_0)
+      return false;
+
     auto *CCNValid = dyn_cast<ConstantSDNode>(CCNode->getOperand(2));
     auto *CCNMask = dyn_cast<ConstantSDNode>(CCNode->getOperand(3));
     if (!CCNValid || !CCNMask)
       return false;
+
+    // Update CCMask and CCValid.
     CCValid = SystemZ::CCMASK_ANY;
     CCMask = CCNMask->getZExtValue();
     if (Invert)
       CCMask ^= SystemZ::CCMASK_ANY;
+
     // Update CCReg link.
     CCReg = CCNode->getOperand(4);
     return true;
@@ -8827,7 +8823,7 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
   if (!CompareRHS)
     return false;
 
-  // Optimize the case where LHS is (ICMP (SRL (IPM))).
+  // Optimize (ICMP (SRL (IPM))).
   int CmpVal = CompareRHS->getZExtValue();
   if (isSRL_IPM_CCSequence(CompareLHS)) {
     if (convertCCValToCCMask(CmpVal)) {
@@ -8837,20 +8833,22 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     return false;
   }
 
-  // Optimize the case where LHS is (ICMP (OR (SRL (IPM (CC))))).
+  // Optimize (ICMP (OR (SRL (IPM (CC))))).
   // t24: i32 = or disjoint t21, Constant:i32<-4>
   // t40: i32 = SystemZISD::ICMP t24, Constant:i32<-2>, TargetConstant:i32<1>
   if (CompareLHS->getOpcode() == ISD::OR) {
     SDValue OrOp0 = CompareLHS->getOperand(0);
     SDValue OrOp1 = CompareLHS->getOperand(1);
+
     // Op0 is (SRL (IPM (CC)). Op1 is const.
     if (isSRL_IPM_CCSequence(OrOp0.getNode())) {
-      auto *OrConst = dyn_cast<ConstantSDNode>(OrOp1);
       // Op1 is Constant:i32<-4>.
+      auto *OrConst = dyn_cast<ConstantSDNode>(OrOp1);
       if (!OrConst || (OrConst->getZExtValue() & 0x3))
         return false;
-      // setullt unsigned(-2) or  setugt unsigned(-3).
-      // mask = 0x1100 => CC != 2 && CC != 3.
+
+      // Try combining and Compute effective CC mask.
+      // setullt -2 or inverted 'setugt -3' =>  CC != 2 && CC != 3.
       CmpVal &= 0x3;
       if (convertCCValToCCMask(CmpVal)) {
         CCValid = SystemZ::CCMASK_ANY;
@@ -8859,13 +8857,12 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
     }
   }
 
-  // Optimize the case where LHS is (ICMP (ADD (SRL (IPM (CC))))).
+  // Optimize (ICMP (ADD (SRL (IPM (CC))))).
   if (CompareLHS->getOpcode() == ISD::ADD) {
     if (isSRL_IPM_CCSequence(CompareLHS->getOperand(0).getNode())) {
-      // (unsigned) CCVal - 1 or (unsigned) CCVal - 3 Inverted.
-      // CCVal <= 2 => CC == 1 || CC == 2.
-      // CCVal <= 3 => CC == 1 || CC == 2 || CC == 3.
-      // (CCVal - AddConst) < CmpVal.
+      // (unsigned)(CmpVal - 1) or (unsigned)(CmpVal - 3) Inverted.
+      // CmpVal <= 2 => CC == 1 || CC == 2.
+      // CmpVal <= 3 => CC == 1 || CC == 2 || CC == 3.
       auto *AddConstOp = dyn_cast<ConstantSDNode>((CompareLHS->getOperand(1)));
       int AddConst = AddConstOp->getZExtValue();
       if ((AddConst != -1) && (AddConst != -3))
@@ -8876,6 +8873,8 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
         AddConst = AddConst & 0x3;
       } else
         AddConst = ~AddConst + 1;
+
+      // Try combining and Compute effective CC mask.
       CmpVal &= 0x3;
       CmpVal += AddConst;
       if (convertCCValToCCMask(CmpVal)) {
@@ -8959,24 +8958,23 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
   return false;
 }
 
-// Combine (select_cc_a (select_cc_b)), where select_cc_a has one of TrueVal
-// or FalseVal has nested select_cc_b(already been combined sequence)
+// Combine (select_ccmask_a (select_ccmask_b)), where select_ccmask_a has one
+// of TrueVal or FalseVal has nested select_ccmask_b.
 SDValue
 SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
                                                   DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
-  // Check if CCOp1 and CCOp2 refers to the same CC condition node.
-  const auto isSameCCIPMOp = [](SDValue &CCOp1, SDValue &CCOp2,
-                                int &CCValidVal) {
+
+  // Verify CCOp1 and CCOp2 refers to the same CC condition node.
+  const auto isSameCCIPMOp = [](SDValue &CCOp1, SDValue &CCOp2) {
     SDNode *N1 = CCOp1.getNode(), *N2 = CCOp2.getNode();
     return N1 && N2 && N1 == N2;
   };
-
   auto *OuterTrueVal = dyn_cast<ConstantSDNode>(N->getOperand(0));
   auto *OuterFalseVal = dyn_cast<ConstantSDNode>(N->getOperand(1));
 
   // Already handled the case both operands constant in combineCCMask.
-  // Not yet encountered the case where both operands are sub-expressions,
+  // Not yet encountered the case where both operands are not constants,
   // that case can be handled by removing this condition.
   if (!((OuterTrueVal != nullptr) ^ (OuterFalseVal != nullptr)))
     return SDValue();
@@ -8991,18 +8989,18 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
   if (!NestedTrueVal || !NestedFalseVal)
     return SDValue();
   bool Invert = false;
-  // Check if outer select_cc_a and nested select_cc_b True/False matching
-  // or inverted.
+  // Check if outer select_ccmask and nested select_ccmask True/False swapped
+  // between the two.
   if (OuterTrueVal) {
-    // OuterFalseVal points to already combined nested select_cc_b.
+    // OuterFalseVal has nested select_ccmask.
     if (OuterTrueVal->getZExtValue() == NestedFalseVal->getZExtValue())
-      Invert = !Invert; // Inverted.
+      Invert = !Invert;
     else if (OuterTrueVal->getZExtValue() != NestedTrueVal->getZExtValue())
       return SDValue();
   } else if (OuterFalseVal) {
-    // OuterTrueVal points to already combined nested select_cc_b.
+    // OuterTrueVal has nested select_ccmask.
     if (OuterFalseVal->getZExtValue() == NestedTrueVal->getZExtValue())
-      Invert = !Invert; // Inverted.
+      Invert = !Invert;
     else if (OuterFalseVal->getZExtValue() != NestedFalseVal->getZExtValue())
       return SDValue();
   }
@@ -9015,57 +9013,44 @@ SystemZTargetLowering::combineSELECT_CC_CCIPMMask(SDNode *N,
 
   int OuterCCValidVal = OuterCCValid->getZExtValue();
   int OuterCCMaskVal = OuterCCMask->getZExtValue();
-  int NestedCCValidVal = NestedCCValid->getZExtValue();
   int NestedCCMaskVal = NestedCCMask->getZExtValue();
   int CCMask = OuterCCMaskVal;
   SDValue OuterCCReg = N->getOperand(4);
   SDValue NestedCCReg = NestedCCOp->getOperand(4);
 
-  // Combine two already combined (select_cc_a (select_cc_b)), where TrueVal
-  // of select_cc_a points to select_cc_b. We return select_cc with TrueVal
-  // and FalseVal from select_cc_b with combined CCMask.
-  // One of OuterTrueVal or OuterFalseVal has select_cc_b.
-  // And both points to the same CC.
-  if ((OuterTrueVal != nullptr) ^ (OuterFalseVal != nullptr)) {
-    if (OuterCCValidVal == NestedCCValidVal &&
-        isSameCCIPMOp(OuterCCReg, NestedCCReg, NestedCCValidVal)) {
-      CCMask |= NestedCCMaskVal;
-      // NestedCCOp has both operands constants.
-      auto Op0 = NestedCCOp->getOperand(0);
-      auto Op1 = NestedCCOp->getOperand(1);
-      // Return combined select_cc.
-      return DAG.getNode(
-          SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), Op0, Op1,
-          DAG.getTargetConstant(OuterCCValidVal, SDLoc(N), MVT::i32),
-          DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32), NestedCCReg);
-    }
-  }
-  // Now handles the case where outer select_cc_a has not yet been combined.
-  // Combine outer select_cc and check if it corresponds to the same
-  // CC as nested CC.
-  // Outer select_cc has yet not been combined.
+  // Try combining outer select_ccmask, and updated OuterCCReg points to the
+  // same CC as NestedCCReg CC.
   if (OuterCCValidVal != SystemZ::CCMASK_ICMP ||
       !combineCCMask(OuterCCReg, OuterCCValidVal, OuterCCMaskVal) ||
-      !isSameCCIPMOp(OuterCCReg, NestedCCReg, NestedCCValidVal))
+      !isSameCCIPMOp(OuterCCReg, NestedCCReg))
     return SDValue();
-  // Check if nested select_cc original CCMask was CCMASK_CMP_EQ.
-  // Only one-bit is set in NestedCCMaskVal for CCMASK_CMP_EQ.
+
+  // Try to decipher if nested select_ccmask original CCMask was CCMASK_CMP_EQ.
+  // Only one-bit sets in NestedCCMaskVal indicates CCMASK_CMP_EQ.
   bool IsNestedCMP_EQ =
       NestedCCMaskVal && !(NestedCCMaskVal & (NestedCCMaskVal - 1));
 
-  // Outer select_cc is inverted with respect to nested select_cc.
+  // Outer and nested TrueVal and FalseVal swapped between the two.
   if (Invert)
     OuterCCMaskVal ^= SystemZ::CCMASK_ANY;
 
-  // Intersection of masks.
-  if (CCMask != SystemZ::CCMASK_CMP_EQ && !IsNestedCMP_EQ)
-    OuterCCMaskVal &= NestedCCMaskVal;
-  else // Union the masks.
+  // Compute the effective CC mask for select.
+  // Generalizing into two categories.
+  // 1. If either of CCMask for outer or nested select_ccmask is
+  //    SystemZ::CCMASK_CMP_EQ, any combination will compute to union of masks.
+  // 2.a. Both are  SystemZ::CCMASK_CMP_NE, !(!a | !b) => (a & b)
+  // 2.b. Combinations of CCMask - SystemZ::CCMASK_CMP_LT or
+  //      SystemZ::CCMASK_CMP_GT with !IsNestedCMP_EQ, overlap masks.
+  if (CCMask == SystemZ::CCMASK_CMP_EQ || IsNestedCMP_EQ)
     OuterCCMaskVal |= NestedCCMaskVal;
+  else
+    OuterCCMaskVal &= NestedCCMaskVal;
 
+  // Get operands from nested select_ccmask.
   SDValue Op0 = NestedCCOp->getOperand(0);
   SDValue Op1 = NestedCCOp->getOperand(1);
 
+  // Return combined select_ccmask.
   return DAG.getNode(
       SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), Op0, Op1,
       DAG.getTargetConstant(OuterCCValidVal, SDLoc(N), MVT::i32),
@@ -9112,20 +9097,25 @@ SDValue SystemZTargetLowering::combineTM(SDNode *N,
   auto *TMOp0Node = N->getOperand(0).getNode();
   auto *TMOp1Const = dyn_cast<ConstantSDNode>(N->getOperand(1));
   auto *TMOp2Const = dyn_cast<ConstantSDNode>(N->getOperand(2));
+  // Third operand of TM is false.
   if (!TMOp0Node || !TMOp1Const || !TMOp2Const ||
-      // Third operand of TM is false.
       TMOp2Const->getZExtValue() != 0)
     return SDValue();
+
   auto TMOp1ConstVal = TMOp1Const->getZExtValue();
   // Optimize (TM (IPM)).
   if (TMOp0Node->getOpcode() == SystemZISD::IPM) {
     int CCMask, CCValid;
+
+    // Compute the effective CC mask for select.
     if (TMOp1ConstVal == (1 << SystemZ::IPM_CC))
       CCMask = SystemZ::CCMASK_CMP_GE;
     else if (TMOp1ConstVal == (1 << (SystemZ::IPM_CC + 1)))
       CCMask = SystemZ::CCMASK_CMP_LE;
     else
       return SDValue();
+
+    // Set CCReg.
     SDValue CCReg = TMOp0Node->getOperand(0);
     CCValid = SystemZ::CCMASK_ANY;
     // Return combined node.
@@ -9138,33 +9128,38 @@ SDValue SystemZTargetLowering::combineTM(SDNode *N,
   // Optimize (TM (XOR (Op0 Op1))).
   if (TMOp0Node->getOpcode() == ISD::XOR) {
     auto *XorOp0 = TMOp0Node->getOperand(0).getNode();
-    // Op0. (SELECT_CCMASK (ICMP (SRL (IPM)))).
-    // Op1. (SRL (IPM (CC))).
+    // Op0: (SELECT_CCMASK (ICMP (SRL (IPM)))).
+    // Op1: (SRL (IPM (CC))).
     if (XorOp0 && XorOp0->getOpcode() == SystemZISD::SELECT_CCMASK) {
       auto *XorOp0CCValid = dyn_cast<ConstantSDNode>(XorOp0->getOperand(2));
       auto *XorOp0CCMask = dyn_cast<ConstantSDNode>(XorOp0->getOperand(3));
       if (!XorOp0CCValid || !XorOp0CCMask)
         return SDValue();
+
       SDValue XorOp0CCReg = XorOp0->getOperand(4);
       int XorOp0CCMaskVal = XorOp0CCMask->getZExtValue();
       int XorOp0CCValidVal = XorOp0CCValid->getZExtValue();
       int CCMask = SystemZ::CCMASK_CMP_EQ, CCValid, TMCCMask;
       SDValue CCReg = TMOp0Node->getOperand(1);
-      // (SELECT_CCMASK (ICMP (SRL (IPM)))).
+
+      // Combine (SELECT_CCMASK (ICMP (SRL (IPM)))) and get CC.
       if (!combineCCMask(XorOp0CCReg, XorOp0CCValidVal, XorOp0CCMaskVal) ||
           // (SRL (IPM (CC))).
           !combineCCMask(CCReg, CCValid, CCMask))
         return SDValue();
-      auto *N0 = XorOp0CCReg.getNode(), *N1 = CCReg.getNode();
+
       // Op0 and Op1 should point to the same CC.
+      auto *N0 = XorOp0CCReg.getNode(), *N1 = CCReg.getNode();
       if (!N0 || !N1 || N0 != N1)
         return SDValue();
+      // Compute the effective CC mask for select.
       if (TMOp1ConstVal == 1)
         TMCCMask = SystemZ::CCMASK_CMP_GE;
       else
         return SDValue();
-      // CCMask ^ XorOp0CCMaskVal = TMCCMask..
+      // CCMask ^ XorOp0CCMaskVal = TMCCMask
       CCMask = XorOp0CCMaskVal ^ TMCCMask;
+
       // Return combined node with combined CCMask.
       return DAG.getNode(
           SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
@@ -9179,10 +9174,12 @@ SDValue SystemZTargetLowering::combineTM(SDNode *N,
 SDValue SystemZTargetLowering::combineAND(SDNode *N,
                                           DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
+
   auto *AndOp0 = N->getOperand(0).getNode();
   auto *AndOp1 = N->getOperand(1).getNode();
   if (!AndOp0 || !AndOp1)
     return SDValue();
+
   // Both Operands of ISD::AND are SystemZISD::SELECT_CCMASK.
   // And CCMask of both operands and they should point to the
   // same CC and update CCReg. Return combined SDNode.
@@ -9194,48 +9191,65 @@ SDValue SystemZTargetLowering::combineAND(SDNode *N,
     auto *Op1CCMask = dyn_cast<ConstantSDNode>(AndOp1->getOperand(3));
     if (!Op0CCValid || !Op1CCValid || !Op0CCMask || !Op1CCMask)
       return SDValue();
+
     int Op0CCValidVal = Op0CCValid->getZExtValue();
     int Op1CCValidVal = Op1CCValid->getZExtValue();
     if (Op0CCValidVal != Op1CCValidVal)
       return SDValue();
+
+    // AndOp0 and AndOp1 should refer to same CC.
     SDValue Op0CCReg = AndOp0->getOperand(4), Op1CCReg = AndOp1->getOperand(4);
     auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = Op1CCReg.getNode();
-    // AndOp0 and AndOp1 should refer to same CC.
     if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
       return SDValue();
-    // Verifying True/False of AndOp0 and AndOp1 matching or inverted.
-    bool Invert = false;
+
+    // Optimizing only the case where Op0TrueVal and Op1TrueVal are equal
+    // and at the same time Op0FalseVal and Op1FalseVal are also equal.
     auto *Op0TrueVal = dyn_cast<ConstantSDNode>(AndOp0->getOperand(0));
     auto *Op0FalseVal = dyn_cast<ConstantSDNode>(AndOp0->getOperand(1));
     auto *Op1TrueVal = dyn_cast<ConstantSDNode>(AndOp1->getOperand(0));
     auto *Op1FalseVal = dyn_cast<ConstantSDNode>(AndOp1->getOperand(1));
     if (!Op0TrueVal || !Op0FalseVal || !Op1TrueVal || !Op1FalseVal)
       return SDValue();
-    if (Op0TrueVal->getZExtValue() == Op1FalseVal->getZExtValue() ||
-        Op1TrueVal->getZExtValue() == Op0FalseVal->getZExtValue())
-      Invert = !Invert;
-    else if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
-             Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
+    if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
+        Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
       return SDValue();
+
+    // Compute the effective CC mask for select.
     int Op0CCMaskVal = Op0CCMask->getZExtValue();
     int Op1CCMaskVal = Op1CCMask->getZExtValue();
     int CCMask = Op0CCMaskVal & Op1CCMaskVal;
-    if (Invert)
-      CCMask ^= SystemZ::CCMASK_ANY;
+
+    // Check And's user's user if it it select_ccmask or br_ccmask, put it
+    // back to WorkList so that algortirm can start processing from outer
+    // select_ccmask/br_ccmask and combine with (icmp (select_ccmask)).
+    for (SDUse &IcmpUse : N->uses()) {
+      auto *Icmp = IcmpUse.getUser();
+      if (Icmp && Icmp->getOpcode() == SystemZISD::ICMP) {
+        for (SDUse &SelectBrUse : Icmp->uses()) {
+          auto *SelectBr = SelectBrUse.getUser();
+          if (SelectBr && (SelectBr->getOpcode() == SystemZISD::SELECT_CCMASK ||
+                           SelectBr->getOpcode() == SystemZISD::BR_CCMASK))
+            DCI.AddToWorklist(SelectBr);
+        }
+      }
+    }
     return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
                        AndOp0->getOperand(0), AndOp0->getOperand(1),
                        DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
                        DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
                        Op0CCReg);
   } else if (AndOp0->getOpcode() == SystemZISD::SELECT_CCMASK) {
-    // AndOp1 is (SRL (IPM (CC))) pattern. Second operand is CC.
+    // AndOp1: (SRL (IPM (CC))).
+    // AndOp2: CC.
     SDValue CCReg = N->getOperand(1);
     int CCMask = SystemZ::CCMASK_CMP_EQ, CCValid;
     if (!combineCCMask(CCReg, CCValid, CCMask))
       return SDValue();
+
+    // AndOp0 and AndOp1 should refer to same CC.
     SDValue Op0CCReg = AndOp0->getOperand(4);
     auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = CCReg.getNode();
-    // AndOp0 and AndOp1 should refer to same CC.
     if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
       return SDValue();
     auto *Op0CCValid = dyn_cast<ConstantSDNode>(AndOp0->getOperand(2));
@@ -9243,11 +9257,28 @@ SDValue SystemZTargetLowering::combineAND(SDNode *N,
     int Op0CCValidVal = Op0CCValid->getZExtValue();
     if (!Op0CCMask || !Op0CCValid)
       return SDValue();
-    int Op0CCMaskVal = Op0CCMask->getZExtValue();
+
+    // Compute the effective CC mask for select.
     // Op0CCMaskVal & CCMask = 0. Invert Op0CCMaskVal.
+    int Op0CCMaskVal = Op0CCMask->getZExtValue();
     int CCMaskVal = Op0CCMaskVal ^ 0xf;
     assert(CCMaskVal < 4 && "CC out of range");
     CCMask = 1 << (3 - CCMaskVal);
+
+    // Check And's user's user if it it select_ccmask or br_ccmask, put it
+    // back to WorkList so that algortirm can start processing from outer
+    // select_ccmask/br_ccmask and combine with (icmp (select_ccmask)).
+    for (SDUse &IcmpUse : N->uses()) {
+      auto *Icmp = IcmpUse.getUser();
+      if (Icmp && Icmp->getOpcode() == SystemZISD::ICMP) {
+        for (SDUse &SelectBrUse : Icmp->uses()) {
+          auto *SelectBr = SelectBrUse.getUser();
+          if (SelectBr && (SelectBr->getOpcode() == SystemZISD::SELECT_CCMASK ||
+                           SelectBr->getOpcode() == SystemZISD::BR_CCMASK))
+            DCI.AddToWorklist(SelectBr);
+        }
+      }
+    }
     return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
                        AndOp0->getOperand(0), AndOp0->getOperand(1),
                        DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
@@ -9260,10 +9291,12 @@ SDValue SystemZTargetLowering::combineAND(SDNode *N,
 SDValue SystemZTargetLowering::combineOR(SDNode *N,
                                          DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
+
   auto *OrOp0 = N->getOperand(0).getNode();
   auto *OrOp1 = N->getOperand(1).getNode();
   if (!OrOp0 || !OrOp1)
     return SDValue();
+
   // Both Operands of ISD::OR are SystemZISD::SELECT_CCMASK.
   // And CCMask of both operands and they should point to the
   // same CC and update CCReg. Return combined SDNode.
@@ -9279,33 +9312,46 @@ SDValue SystemZTargetLowering::combineOR(SDNode *N,
     int Op1CCValidVal = Op1CCValid->getZExtValue();
     if (Op0CCValidVal != Op1CCValidVal)
       return SDValue();
+
+    // OrOp0 and OrOp0 should point to same CC.
     SDValue Op0CCReg = OrOp0->getOperand(4), Op1CCReg = OrOp1->getOperand(4);
     auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = Op1CCReg.getNode();
-    // OrOp0 and OrOp0 should point to same CC.
     if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
       return SDValue();
-    // Checking True/False of OrOp0 and OrOp1 matching or inverted.
-    bool Invert = false;
+
+    // Optimizing only the case where Op0TrueVal and Op1TrueVal are equal
+    // and at the same time Op0FalseVal and Op1FalseVal are also equal.
     auto *Op0TrueVal = dyn_cast<ConstantSDNode>(OrOp0->getOperand(0));
     auto *Op0FalseVal = dyn_cast<ConstantSDNode>(OrOp0->getOperand(1));
     auto *Op1TrueVal = dyn_cast<ConstantSDNode>(OrOp1->getOperand(0));
     auto *Op1FalseVal = dyn_cast<ConstantSDNode>(OrOp1->getOperand(1));
     if (!Op0TrueVal || !Op0FalseVal || !Op1TrueVal || !Op1FalseVal)
       return SDValue();
-    if (Op0TrueVal->getZExtValue() == Op1FalseVal->getZExtValue() ||
-        Op1TrueVal->getZExtValue() == Op0FalseVal->getZExtValue())
-      Invert = !Invert;
-    else if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
-             Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
+    if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
+        Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
       return SDValue();
+
+    // Compute the effective CC mask for select.
     int Op0CCMaskVal = Op0CCMask->getZExtValue();
     int Op1CCMaskVal = Op1CCMask->getZExtValue();
-    // Oring Masks.
     int CCMask = Op0CCMaskVal | Op1CCMaskVal;
-    if (Invert)
-      CCMask ^= SystemZ::CCMASK_ANY;
+
+    // Check Or's user's user if it it select_ccmask or br_ccmask, put it back
+    // to WorkList so that algortirm can start processing from outer
+    // select_ccmask/br_ccmask and combine with (icmp (select_ccmask)).
+    for (SDUse &IcmpUse : N->uses()) {
+      auto *Icmp = IcmpUse.getUser();
+      if (Icmp && Icmp->getOpcode() == SystemZISD::ICMP) {
+        for (SDUse &SelectBrUse : Icmp->uses()) {
+          auto *SelectBr = SelectBrUse.getUser();
+          if (SelectBr && (SelectBr->getOpcode() == SystemZISD::SELECT_CCMASK ||
+                           SelectBr->getOpcode() == SystemZISD::BR_CCMASK))
+            DCI.AddToWorklist(SelectBr);
+        }
+      }
+    }
     return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
-                       OrOp0->getOperand(0), OrOp1->getOperand(1),
+                       OrOp0->getOperand(0), OrOp0->getOperand(1),
                        DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
                        DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
                        Op0CCReg);
@@ -9313,82 +9359,15 @@ SDValue SystemZTargetLowering::combineOR(SDNode *N,
   return SDValue();
 }
 
-SDValue SystemZTargetLowering::combineICMP(SDNode *N,
-                                           DAGCombinerInfo &DCI) const {
-  SelectionDAG &DAG = DCI.DAG;
-  auto *LHS = N->getOperand(0).getNode();
-  if (!LHS)
-    return SDValue();
-  auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
-  // Handle the case where RHS is const.
-  // Remove redundant ICMP left after operand 0 pattern has been combined by
-  // by combineAND, combineXOR, combineOR into select_cc on the assumption
-  // CmpVal is zero.
-  // Why is redundant ICMP created?
-  // This redundant 'icmp' is created as we are combining '(icmp (and...))',
-  // 'icmp (or...))', or 'icmp (xor...))' in combineAND, combineOR, or
-  // combineXOR at 'and', 'or', 'xor' level and select_ccmask is returned.
-  // e.g t10 is returned by combineOR/XOR/AND.
-  // t10: (select_ccmask VAL1 VAL2 CCMASK_ANY CCOp)
-  // (icmp t10 0)
-  // Following code return t10, entire select_ccmask.
-  // This dangling icmp can be avoided if we combine entire sequence at 'icmp'
-  // level. But downside of this approach is most of this code will be pulled
-  // into combineICMP and might leave some of the added pattern to
-  // PerformDAGCombine useless.
-  if (RHS && LHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
-    auto *CCValid = dyn_cast<ConstantSDNode>(LHS->getOperand(2));
-    int CmpVal = RHS->getZExtValue();
-    // Check CmpVal is zero.
-    if (!CCValid || CmpVal != 0)
-      return SDValue();
-    return N->getOperand(0);
-  }
-  if (RHS)
-    return SDValue();
-  // Handle the case where RHS is also expression.
-  // Optimize (ICMP (SELECT_CCMASK AND (SRL (IPM (CC))))).
-  // ICMP: Op0 - SELECT_CCMASK and Op1 - (AND (SRL (IPM (CC)))).
-  if (LHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
-    auto *SelectCCValid = dyn_cast<ConstantSDNode>(LHS->getOperand(2));
-    auto *SelectCCMask = dyn_cast<ConstantSDNode>(LHS->getOperand(3));
-    if (!SelectCCValid || !SelectCCMask)
-      return SDValue();
-    auto *RHS = N->getOperand(1).getNode();
-    if (RHS && RHS->getOpcode() == ISD::AND) {
-      SDValue AndCCReg = RHS->getOperand(0);
-      auto *AndConst = dyn_cast<ConstantSDNode>(RHS->getOperand(1));
-      if (!AndConst || AndConst->getZExtValue() != 1)
-        return SDValue();
-      int AndCCMask = SystemZ::CCMASK_CMP_EQ, AndCCValid;
-      // (AND (SRL (IPM (CC)))). Anding bit 28.
-      if (!combineCCMask(AndCCReg, AndCCValid, AndCCMask))
-        return SDValue();
-      // Anding bit 28 false => CC == 0 || CC == 2.
-      AndCCMask = SystemZ::CCMASK_CMP_GE;
-      auto *N1 = AndCCReg.getNode(), *N2 = LHS->getOperand(4).getNode();
-      if (!N1 || !N2 || N1 != N2)
-        return SDValue();
-      // CCMask for comparing AndCCMask and SelectCCMasK.
-      int CCMask = AndCCMask ^ SelectCCMask->getZExtValue();
-      AndCCValid = SystemZ::CCMASK_ANY;
-      return DAG.getNode(
-          SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
-          LHS->getOperand(0), LHS->getOperand(1),
-          DAG.getTargetConstant(AndCCValid, SDLoc(N), MVT::i32),
-          DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32), AndCCReg);
-    }
-  }
-  return SDValue();
-}
-
 SDValue SystemZTargetLowering::combineXOR(SDNode *N,
                                           DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
+
   auto *XorOp0 = N->getOperand(0).getNode();
   auto *XorOp1 = N->getOperand(1).getNode();
   if (!XorOp0 || !XorOp1)
     return SDValue();
+
   // Both Operands of ISD::XOR are SystemZISD::SELECT_CCMASK.
   // And CCMask of both operands and they should point to the
   // same CC and update CCReg. Return combined SDNode.
@@ -9400,40 +9379,53 @@ SDValue SystemZTargetLowering::combineXOR(SDNode *N,
     auto *Op1CCMask = dyn_cast<ConstantSDNode>(XorOp1->getOperand(3));
     if (!Op0CCValid || !Op1CCValid || !Op0CCMask || !Op1CCMask)
       return SDValue();
+
     int Op0CCValidVal = Op0CCValid->getZExtValue();
     int Op1CCValidVal = Op1CCValid->getZExtValue();
     if (Op0CCValidVal != Op1CCValidVal)
       return SDValue();
+
+    // XorOp0 and XorOp0 should point to same CC.
     SDValue Op0CCReg = XorOp0->getOperand(4), Op1CCReg = XorOp1->getOperand(4);
     auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = Op1CCReg.getNode();
-    // Same CC node.
     if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
       return SDValue();
-    // XorOp0 and XorOp0 should point to same CC.
     if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
       return SDValue();
-    // Checking True/False of XorOp0 and XorOp1 matching or inverted.
-    bool Invert = false;
+
+    // Optimizing only the case where Op0TrueVal and Op1TrueVal are equal
+    // and at the same time Op0FalseVal and Op1FalseVal are also equal.
     auto *Op0TrueVal = dyn_cast<ConstantSDNode>(XorOp0->getOperand(0));
     auto *Op0FalseVal = dyn_cast<ConstantSDNode>(XorOp0->getOperand(1));
     auto *Op1TrueVal = dyn_cast<ConstantSDNode>(XorOp1->getOperand(0));
     auto *Op1FalseVal = dyn_cast<ConstantSDNode>(XorOp1->getOperand(1));
     if (!Op0TrueVal || !Op0FalseVal || !Op1TrueVal || !Op1FalseVal)
       return SDValue();
-    if (Op0TrueVal->getZExtValue() == Op1FalseVal->getZExtValue() ||
-        Op1TrueVal->getZExtValue() == Op0FalseVal->getZExtValue())
-      Invert = !Invert;
-    else if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
-             Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
+    if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
+        Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
       return SDValue();
+
+    // Compute the effective CC mask for select.
     int Op0CCMaskVal = Op0CCMask->getZExtValue();
     int Op1CCMaskVal = Op1CCMask->getZExtValue();
-    // Xor the masks.
     int CCMask = Op0CCMaskVal ^ Op1CCMaskVal;
-    if (Invert)
-      CCMask ^= SystemZ::CCMASK_ANY;
+
+    // Check Xor's user's user if it it select_ccmask or br_ccmask, put it back
+    // to WorkList so that algortirm can start processing from outer
+    // select_ccmask/br_ccmask and combine with (icmp (select_ccmask)).
+    for (SDUse &IcmpUse : N->uses()) {
+      auto *Icmp = IcmpUse.getUser();
+      if (Icmp && Icmp->getOpcode() == SystemZISD::ICMP) {
+        for (SDUse &SelectBrUse : Icmp->uses()) {
+          auto *SelectBr = SelectBrUse.getUser();
+          if (SelectBr && (SelectBr->getOpcode() == SystemZISD::SELECT_CCMASK ||
+                           SelectBr->getOpcode() == SystemZISD::BR_CCMASK))
+            DCI.AddToWorklist(SelectBr);
+        }
+      }
+    }
     return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
-                       XorOp0->getOperand(0), XorOp1->getOperand(1),
+                       XorOp0->getOperand(0), XorOp0->getOperand(1),
                        DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
                        DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
                        Op0CCReg);
@@ -9468,8 +9460,9 @@ SDValue SystemZTargetLowering::combineBR_CCMASK(SDNode *N,
 SDValue SystemZTargetLowering::combineSELECT_CCMASK(
     SDNode *N, DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
-  // Handle the case where select_cc_outer has TrueVal or FalseVal has
-  // altredy combined select_cc_nested.
+
+  // Handle the case where select_ccmask has TrueVal or FalseVal points to
+  // nested select_ccmake.
   SDValue Res = combineSELECT_CC_CCIPMMask(N, DCI);
   if (Res != SDValue())
     return Res;
@@ -9483,6 +9476,7 @@ SDValue SystemZTargetLowering::combineSELECT_CCMASK(
   int CCValidVal = CCValid->getZExtValue();
   int CCMaskVal = CCMask->getZExtValue();
   SDValue CCReg = N->getOperand(4);
+
   if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
     return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
                        N->getOperand(0), N->getOperand(1),
@@ -9816,8 +9810,6 @@ SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
   case ISD::INTRINSIC_VOID:     return combineINTRINSIC(N, DCI);
   case ISD::AND:
     return combineAND(N, DCI);
-  case SystemZISD::ICMP:
-    return combineICMP(N, DCI);
   case ISD::OR:
     return combineOR(N, DCI);
   case SystemZISD::TM:
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 424dd530a0b67..987dbe1dd1d34 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -800,7 +800,6 @@ class SystemZTargetLowering : public TargetLowering {
   SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineAND(SDNode *N, DAGCombinerInfo &DCI) const;
-  SDValue combineICMP(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineOR(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineTM(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineXOR(SDNode *N, DAGCombinerInfo &DCI) const;
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed.ll
index 46e162f697a73..a4fd5f922e657 100644
--- a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed.ll
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed.ll
@@ -166,9 +166,14 @@ define signext range(i32 0, 43) i32 @foo_023_XOR_OR(i32 noundef signext %x) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    lghi %r2, 42
-; CHECK-NEXT:    bnlr %r14
-; CHECK-NEXT:  .LBB6_1: # %entry
-; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    lghi %r0, 42
+; CHECK-NEXT:    jhe .LBB6_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    lghi %r0, 0
+; CHECK-NEXT:  .LBB6_2: # %entry
+; CHECK-NEXT:    bor %r14
+; CHECK-NEXT:  .LBB6_3: # %entry
+; CHECK-NEXT:    lgr %r2, %r0
 ; CHECK-NEXT:    br %r14
 entry:
   %0 = tail call { i32, i32 } asm sideeffect "ahi $0,42\0A", "=d,={@cc},0"(i32 %x) #2
@@ -847,7 +852,12 @@ define i64 @bar_023_OR_XOR_c() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    jgnl dummy at PLT
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    risbg %r1, %r0, 63, 191, 36
+; CHECK-NEXT:    afi %r0, -805306368
+; CHECK-NEXT:    srl %r0, 31
+; CHECK-NEXT:    cr %r0, %r1
+; CHECK-NEXT:    jglh dummy at PLT
 ; CHECK-NEXT:  .LBB33_1: # %if.end
 ; CHECK-NEXT:    br %r14
 entry:
diff --git a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
index 6444cd91f76d4..64b2e381b73b8 100644
--- a/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
+++ b/llvm/test/CodeGen/SystemZ/flag_output_operand_ccmixed_eq_noteq.ll
@@ -5078,7 +5078,12 @@ define i64 @bar4a_023_OR_XOR_c() {
 ; CHECK-NEXT:    alsi 0(%r1), -1
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    jgl dummy at PLT
+; CHECK-NEXT:    ipm %r0
+; CHECK-NEXT:    risbg %r1, %r0, 63, 191, 36
+; CHECK-NEXT:    afi %r0, 1342177280
+; CHECK-NEXT:    srl %r0, 31
+; CHECK-NEXT:    cr %r0, %r1
+; CHECK-NEXT:    jglh dummy at PLT
 ; CHECK-NEXT:  .LBB209_1: # %if.end
 ; CHECK-NEXT:    br %r14
 entry:

>From 44f2b5d33b7b09218f06aabb512cc6122cda09d2 Mon Sep 17 00:00:00 2001
From: anoopkg6 <anoopkg6 at github.com>
Date: Wed, 2 Jul 2025 04:01:11 +0200
Subject: [PATCH 12/12] Incorporated code review feedback. 1. Remove combineTM
 and move code to combineCCMask and combine select_ccmask or    br_ccmask with
 (TM (select_ccmask cc-value)). cc-value is (SRL (IPM (CC))). 2. Implement xor
 (select_ccmask .. CC) cc-value for cases where only one bit is    set or on
 clear bit in CCMask. 3. Constrained combineOR, combineXOR and combineAND
 optimization for    combinations of TrueVal Vs FalseVal between the
 select_ccmask.

---
 .../Target/SystemZ/SystemZISelLowering.cpp    | 259 ++++++++++--------
 llvm/lib/Target/SystemZ/SystemZISelLowering.h |   1 -
 2 files changed, 148 insertions(+), 112 deletions(-)

diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 5728ee45b3f29..fd26e1cfffa70 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -8774,36 +8774,59 @@ static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
   if (!CCNode)
     return false;
 
-  // Check (SRL (IPM)) pattern and update CCReg if true.
-  if (isSRL_IPM_CCSequence(CCNode))
-    return true;
-
-  // Combine CCMASK_TM with select_ccmask.
-  if (CCNode->getOpcode() == SystemZISD::SELECT_CCMASK) {
+  // Optimize (TM (IPM (CC)))
+  if (CCNode->getOpcode() == SystemZISD::TM) {
     bool Invert = false;
-
-    // Outer select_ccmask is TM.
     if (CCValid != SystemZ::CCMASK_TM)
       return false;
     if (CCMask == SystemZ::CCMASK_TM_SOME_1)
       Invert = !Invert;
     else if (CCMask != SystemZ::CCMASK_TM_ALL_0)
       return false;
-
-    auto *CCNValid = dyn_cast<ConstantSDNode>(CCNode->getOperand(2));
-    auto *CCNMask = dyn_cast<ConstantSDNode>(CCNode->getOperand(3));
-    if (!CCNValid || !CCNMask)
+    auto *N = CCNode->getOperand(0).getNode();
+    auto *TMOp1Const = dyn_cast<ConstantSDNode>(CCNode->getOperand(1));
+    auto *TMOp2Const = dyn_cast<ConstantSDNode>(CCNode->getOperand(2));
+    if (!N || !TMOp1Const || !TMOp2Const || TMOp2Const->getZExtValue() != 0)
       return false;
-
-    // Update CCMask and CCValid.
-    CCValid = SystemZ::CCMASK_ANY;
-    CCMask = CCNMask->getZExtValue();
-    if (Invert)
-      CCMask ^= SystemZ::CCMASK_ANY;
-
-    // Update CCReg link.
-    CCReg = CCNode->getOperand(4);
-    return true;
+    auto TMConstVal = TMOp1Const->getZExtValue();
+    if (N->getOpcode() == SystemZISD::IPM) {
+      if (TMConstVal == (1 << SystemZ::IPM_CC))
+        CCMask = SystemZ::CCMASK_CMP_GE;
+      else if (TMConstVal == (1 << (SystemZ::IPM_CC + 1)))
+        CCMask = SystemZ::CCMASK_CMP_LE;
+      else
+        return false;
+      if (Invert)
+        CCMask ^= CCValid;
+      // Return the updated CCReg link.
+      CCReg = N->getOperand(0);
+      return true;
+    } else if (N->getOpcode() == SystemZISD::SELECT_CCMASK) {
+      if (TMConstVal != 1)
+        return false;
+      auto *TrueVal = dyn_cast<ConstantSDNode>(N->getOperand(0));
+      auto *FalseVal = dyn_cast<ConstantSDNode>(N->getOperand(1));
+      if (TrueVal && isSRL_IPM_CCSequence(N->getOperand(1).getNode())) {
+        if ((TrueVal->getZExtValue() & TMConstVal) == 0)
+          CCMask = SystemZ::CCMASK_ANY ^ SystemZ::CCMASK_CMP_LT;
+        else
+          return false;
+      } else if (FalseVal && isSRL_IPM_CCSequence(N->getOperand(0).getNode())) {
+        if ((FalseVal->getZExtValue() & TMConstVal) == 0) {
+          Invert = !Invert;
+          CCMask = SystemZ::CCMASK_ANY ^ SystemZ::CCMASK_CMP_LT;
+        } else if ((FalseVal->getZExtValue() & TMConstVal) == 1)
+          CCMask = SystemZ::CCMASK_ANY ^ SystemZ::CCMASK_CMP_GT;
+        else
+          return false;
+      } else
+        return false;
+      // CCReg is updated by isSRL_IPM_CCSequence.
+      if (Invert)
+        CCMask ^= CCValid;
+      return true;
+    }
+    return false;
   }
 
   // Rest of the code has sequence starting with opcode SystemZISD::ICMP.
@@ -9091,86 +9114,6 @@ SystemZTargetLowering::getJumpConditionMergingParams(Instruction::BinaryOps Opc,
   return {-1, -1, -1};
 }
 
-SDValue SystemZTargetLowering::combineTM(SDNode *N,
-                                         DAGCombinerInfo &DCI) const {
-  SelectionDAG &DAG = DCI.DAG;
-  auto *TMOp0Node = N->getOperand(0).getNode();
-  auto *TMOp1Const = dyn_cast<ConstantSDNode>(N->getOperand(1));
-  auto *TMOp2Const = dyn_cast<ConstantSDNode>(N->getOperand(2));
-  // Third operand of TM is false.
-  if (!TMOp0Node || !TMOp1Const || !TMOp2Const ||
-      TMOp2Const->getZExtValue() != 0)
-    return SDValue();
-
-  auto TMOp1ConstVal = TMOp1Const->getZExtValue();
-  // Optimize (TM (IPM)).
-  if (TMOp0Node->getOpcode() == SystemZISD::IPM) {
-    int CCMask, CCValid;
-
-    // Compute the effective CC mask for select.
-    if (TMOp1ConstVal == (1 << SystemZ::IPM_CC))
-      CCMask = SystemZ::CCMASK_CMP_GE;
-    else if (TMOp1ConstVal == (1 << (SystemZ::IPM_CC + 1)))
-      CCMask = SystemZ::CCMASK_CMP_LE;
-    else
-      return SDValue();
-
-    // Set CCReg.
-    SDValue CCReg = TMOp0Node->getOperand(0);
-    CCValid = SystemZ::CCMASK_ANY;
-    // Return combined node.
-    return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
-                       N->getOperand(0), N->getOperand(1),
-                       DAG.getTargetConstant(CCValid, SDLoc(N), MVT::i32),
-                       DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
-                       CCReg);
-  }
-  // Optimize (TM (XOR (Op0 Op1))).
-  if (TMOp0Node->getOpcode() == ISD::XOR) {
-    auto *XorOp0 = TMOp0Node->getOperand(0).getNode();
-    // Op0: (SELECT_CCMASK (ICMP (SRL (IPM)))).
-    // Op1: (SRL (IPM (CC))).
-    if (XorOp0 && XorOp0->getOpcode() == SystemZISD::SELECT_CCMASK) {
-      auto *XorOp0CCValid = dyn_cast<ConstantSDNode>(XorOp0->getOperand(2));
-      auto *XorOp0CCMask = dyn_cast<ConstantSDNode>(XorOp0->getOperand(3));
-      if (!XorOp0CCValid || !XorOp0CCMask)
-        return SDValue();
-
-      SDValue XorOp0CCReg = XorOp0->getOperand(4);
-      int XorOp0CCMaskVal = XorOp0CCMask->getZExtValue();
-      int XorOp0CCValidVal = XorOp0CCValid->getZExtValue();
-      int CCMask = SystemZ::CCMASK_CMP_EQ, CCValid, TMCCMask;
-      SDValue CCReg = TMOp0Node->getOperand(1);
-
-      // Combine (SELECT_CCMASK (ICMP (SRL (IPM)))) and get CC.
-      if (!combineCCMask(XorOp0CCReg, XorOp0CCValidVal, XorOp0CCMaskVal) ||
-          // (SRL (IPM (CC))).
-          !combineCCMask(CCReg, CCValid, CCMask))
-        return SDValue();
-
-      // Op0 and Op1 should point to the same CC.
-      auto *N0 = XorOp0CCReg.getNode(), *N1 = CCReg.getNode();
-      if (!N0 || !N1 || N0 != N1)
-        return SDValue();
-      // Compute the effective CC mask for select.
-      if (TMOp1ConstVal == 1)
-        TMCCMask = SystemZ::CCMASK_CMP_GE;
-      else
-        return SDValue();
-      // CCMask ^ XorOp0CCMaskVal = TMCCMask
-      CCMask = XorOp0CCMaskVal ^ TMCCMask;
-
-      // Return combined node with combined CCMask.
-      return DAG.getNode(
-          SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
-          XorOp0->getOperand(0), XorOp0->getOperand(1),
-          DAG.getTargetConstant(XorOp0CCValidVal, SDLoc(N), MVT::i32),
-          DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32), CCReg);
-    }
-  }
-  return SDValue();
-}
-
 SDValue SystemZTargetLowering::combineAND(SDNode *N,
                                           DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -9214,6 +9157,9 @@ SDValue SystemZTargetLowering::combineAND(SDNode *N,
     if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
         Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
       return SDValue();
+    if (Op0TrueVal->getZExtValue() == Op1FalseVal->getZExtValue() ||
+        Op1TrueVal->getZExtValue() == Op0FalseVal->getZExtValue())
+      return SDValue();
 
     // Compute the effective CC mask for select.
     int Op0CCMaskVal = Op0CCMask->getZExtValue();
@@ -9240,22 +9186,30 @@ SDValue SystemZTargetLowering::combineAND(SDNode *N,
                        DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
                        Op0CCReg);
   } else if (AndOp0->getOpcode() == SystemZISD::SELECT_CCMASK) {
-    // AndOp1: (SRL (IPM (CC))).
-    // AndOp2: CC.
-    SDValue CCReg = N->getOperand(1);
-    int CCMask = SystemZ::CCMASK_CMP_EQ, CCValid;
-    if (!combineCCMask(CCReg, CCValid, CCMask))
+    // AndOp1: (SRL (IPM)).
+    if (AndOp1->getOpcode() != ISD::SRL)
       return SDValue();
+    auto *SRLCount = dyn_cast<ConstantSDNode>(AndOp1->getOperand(1));
+    if (!SRLCount || SRLCount->getZExtValue() != SystemZ::IPM_CC)
+      return SDValue();
+    auto *IPM = AndOp1->getOperand(0).getNode();
+    if (!IPM || IPM->getOpcode() != SystemZISD::IPM)
+      return SDValue();
+    SDValue CCReg = IPM->getOperand(0);
 
     // AndOp0 and AndOp1 should refer to same CC.
     SDValue Op0CCReg = AndOp0->getOperand(4);
     auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = CCReg.getNode();
     if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
       return SDValue();
+    auto *Op0TrueVal = dyn_cast<ConstantSDNode>(AndOp0->getOperand(0));
+    auto *Op0FalseVal = dyn_cast<ConstantSDNode>(AndOp0->getOperand(1));
     auto *Op0CCValid = dyn_cast<ConstantSDNode>(AndOp0->getOperand(2));
     auto *Op0CCMask = dyn_cast<ConstantSDNode>(AndOp0->getOperand(3));
     int Op0CCValidVal = Op0CCValid->getZExtValue();
-    if (!Op0CCMask || !Op0CCValid)
+    if (!Op0TrueVal || !Op0FalseVal || !Op0CCMask || !Op0CCValid)
+      return SDValue();
+    if (Op0TrueVal->getZExtValue() != 1 || Op0FalseVal->getZExtValue() != 0)
       return SDValue();
 
     // Compute the effective CC mask for select.
@@ -9263,7 +9217,7 @@ SDValue SystemZTargetLowering::combineAND(SDNode *N,
     int Op0CCMaskVal = Op0CCMask->getZExtValue();
     int CCMaskVal = Op0CCMaskVal ^ 0xf;
     assert(CCMaskVal < 4 && "CC out of range");
-    CCMask = 1 << (3 - CCMaskVal);
+    int CCMask = 1 << (3 - CCMaskVal);
 
     // Check And's user's user if it it select_ccmask or br_ccmask, put it
     // back to WorkList so that algortirm can start processing from outer
@@ -9330,6 +9284,9 @@ SDValue SystemZTargetLowering::combineOR(SDNode *N,
     if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
         Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
       return SDValue();
+    if (Op0TrueVal->getZExtValue() == Op1FalseVal->getZExtValue() ||
+        Op1TrueVal->getZExtValue() == Op0FalseVal->getZExtValue())
+      return SDValue();
 
     // Compute the effective CC mask for select.
     int Op0CCMaskVal = Op0CCMask->getZExtValue();
@@ -9404,6 +9361,9 @@ SDValue SystemZTargetLowering::combineXOR(SDNode *N,
     if (Op0TrueVal->getZExtValue() != Op1TrueVal->getZExtValue() ||
         Op0FalseVal->getZExtValue() != Op1FalseVal->getZExtValue())
       return SDValue();
+    if (Op0TrueVal->getZExtValue() == Op1FalseVal->getZExtValue() ||
+        Op1TrueVal->getZExtValue() == Op0FalseVal->getZExtValue())
+      return SDValue();
 
     // Compute the effective CC mask for select.
     int Op0CCMaskVal = Op0CCMask->getZExtValue();
@@ -9429,6 +9389,85 @@ SDValue SystemZTargetLowering::combineXOR(SDNode *N,
                        DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
                        DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32),
                        Op0CCReg);
+  } else if (XorOp0->getOpcode() == SystemZISD::SELECT_CCMASK) {
+    // XorOp1: (SRL (IPM)).
+    if (XorOp1->getOpcode() != ISD::SRL)
+      return SDValue();
+    auto *SRLCount = dyn_cast<ConstantSDNode>(XorOp1->getOperand(1));
+    if (!SRLCount || SRLCount->getZExtValue() != SystemZ::IPM_CC)
+      return SDValue();
+    auto *IPM = XorOp1->getOperand(0).getNode();
+    if (!IPM || IPM->getOpcode() != SystemZISD::IPM)
+      return SDValue();
+    SDValue CCReg = IPM->getOperand(0);
+
+    // XorOp0 and XorOp1 should refer to same CC.
+    SDValue Op0CCReg = XorOp0->getOperand(4);
+    auto *CCNode0 = Op0CCReg.getNode(), *CCNode1 = CCReg.getNode();
+    if (!CCNode0 || !CCNode1 || CCNode0 != CCNode1)
+      return SDValue();
+    auto *Op0TrueVal = dyn_cast<ConstantSDNode>(XorOp0->getOperand(0));
+    auto *Op0FalseVal = dyn_cast<ConstantSDNode>(XorOp0->getOperand(1));
+    auto *Op0CCValid = dyn_cast<ConstantSDNode>(XorOp0->getOperand(2));
+    auto *Op0CCMask = dyn_cast<ConstantSDNode>(XorOp0->getOperand(3));
+    int Op0CCValidVal = Op0CCValid->getZExtValue();
+    if (!Op0TrueVal || !Op0FalseVal || !Op0CCMask || !Op0CCValid)
+      return SDValue();
+
+    const auto isOneBitSet = [](int Mask) {
+      return Mask && !(Mask & (Mask - 1));
+    };
+
+    // Only one-bit set.
+    const auto log2CCMaskToCCVal = [](int Mask) {
+      size_t Pos = 0;
+      while (!(Mask & 1)) {
+        Mask >>= 1;
+        ++Pos;
+      };
+      assert(Pos < 4 && "CC out of range");
+      return (3 - Pos);
+    };
+
+    int CCMask = Op0CCMask->getZExtValue();
+    SDValue TrueVal, FalseVal;
+    // There are two cases - either only one bit set or one clear bit.
+    if (isOneBitSet(CCMask)) {
+      int CCVal = log2CCMaskToCCVal(CCMask);
+      // select_ccmask (CCVal ^ Op0TrueVal) (SRL IPM CC) CCValid/CCMask CC.
+      TrueVal = DAG.getTargetConstant((CCVal ^ Op0TrueVal->getZExtValue()),
+                                      SDLoc(N), MVT::i32);
+      FalseVal = N->getOperand(1);
+    } else if (isOneBitSet((CCMask ^ 0xf))) {
+      // Only one clear bit.
+      // select_ccmask (SRL IPM CC) (CCVal ^ Op0TrueVal) CCValid/CCMask CC.
+      // Value cc-value must have in FalseVal
+      int CCVal = log2CCMaskToCCVal(CCMask ^ 0xf);
+      TrueVal = N->getOperand(1);
+      FalseVal = DAG.getTargetConstant((CCVal ^ Op0TrueVal->getZExtValue()),
+                                       SDLoc(N), MVT::i32);
+    } else
+      return SDValue();
+
+    // Check Xor's user's user if it it select_ccmask or br_ccmask, put it
+    // back to WorkList so that algortirm can start processing from outer
+    // select_ccmask/br_ccmask and combine with (icmp (select_ccmask)).
+    for (SDUse &XorUse : N->uses()) {
+      auto *CCNode = XorUse.getUser();
+      if (CCNode && (CCNode->getOpcode() == SystemZISD::TM /*||
+                     CCNode->getOpcode() == SystemZISD::ICMP*/)) {
+        for (SDUse &SelectBrUse : CCNode->uses()) {
+          auto *SelectBr = SelectBrUse.getUser();
+          if (SelectBr && (SelectBr->getOpcode() == SystemZISD::SELECT_CCMASK ||
+                           SelectBr->getOpcode() == SystemZISD::BR_CCMASK))
+            DCI.AddToWorklist(SelectBr);
+        }
+      }
+    }
+    return DAG.getNode(
+        SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), TrueVal,
+        FalseVal, DAG.getTargetConstant(Op0CCValidVal, SDLoc(N), MVT::i32),
+        DAG.getTargetConstant(CCMask, SDLoc(N), MVT::i32), CCReg);
   }
   return SDValue();
 }
@@ -9812,8 +9851,6 @@ SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
     return combineAND(N, DCI);
   case ISD::OR:
     return combineOR(N, DCI);
-  case SystemZISD::TM:
-    return combineTM(N, DCI);
   case ISD::XOR:
     return combineXOR(N, DCI);
   }
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 987dbe1dd1d34..db17970411978 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -801,7 +801,6 @@ class SystemZTargetLowering : public TargetLowering {
   SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineAND(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineOR(SDNode *N, DAGCombinerInfo &DCI) const;
-  SDValue combineTM(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineXOR(SDNode *N, DAGCombinerInfo &DCI) const;
 
   SDValue unwrapAddress(SDValue N) const override;



More information about the llvm-commits mailing list