[llvm] 4155cc0 - [SystemZ] Recognize carry/borrow computation

Ulrich Weigand via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 15 10:29:24 PDT 2025


Author: Ulrich Weigand
Date: 2025-03-15T18:28:44+01:00
New Revision: 4155cc0fb3198b1aa4b8e6601980f418c0428cf7

URL: https://github.com/llvm/llvm-project/commit/4155cc0fb3198b1aa4b8e6601980f418c0428cf7
DIFF: https://github.com/llvm/llvm-project/commit/4155cc0fb3198b1aa4b8e6601980f418c0428cf7.diff

LOG: [SystemZ] Recognize carry/borrow computation

Generate code using the VECTOR ADD COMPUTE CARRY and
VECTOR SUBTRACT COMPUTE BORROW INDICATION instructions
to implement open-coded IR with those semantics.

Handles integer vector types as well as i128.

Fixes: https://github.com/llvm/llvm-project/issues/129608

Added: 
    llvm/test/CodeGen/SystemZ/int-cmp-65.ll
    llvm/test/CodeGen/SystemZ/vec-cmp-10.ll

Modified: 
    llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
    llvm/lib/Target/SystemZ/SystemZInstrVector.td
    llvm/lib/Target/SystemZ/SystemZOperators.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 78706f20d2cae..895d3c214a03c 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -7343,6 +7343,37 @@ SDValue SystemZTargetLowering::combineZERO_EXTEND(
       }
     }
   }
+  // Recognize patterns for VECTOR SUBTRACT COMPUTE BORROW INDICATION
+  // and VECTOR ADD COMPUTE CARRY for i128:
+  //   (zext (setcc_uge X Y)) --> (VSCBI X Y)
+  //   (zext (setcc_ule Y X)) --> (VSCBI X Y)
+  //   (zext (setcc_ult (add X Y) X/Y) -> (VACC X Y)
+  //   (zext (setcc_ugt X/Y (add X Y)) -> (VACC X Y)
+  // For vector types, these patterns are recognized in the .td file.
+  if (N0.getOpcode() == ISD::SETCC && isTypeLegal(VT) && VT == MVT::i128 &&
+      N0.getOperand(0).getValueType() == VT) {
+    SDValue Op0 = N0.getOperand(0);
+    SDValue Op1 = N0.getOperand(1);
+    const ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
+    switch (CC) {
+    case ISD::SETULE:
+      std::swap(Op0, Op1);
+      [[fallthrough]];
+    case ISD::SETUGE:
+      return DAG.getNode(SystemZISD::VSCBI, SDLoc(N0), VT, Op0, Op1);
+    case ISD::SETUGT:
+      std::swap(Op0, Op1);
+      [[fallthrough]];
+    case ISD::SETULT:
+      if (Op0->hasOneUse() && Op0->getOpcode() == ISD::ADD &&
+          (Op0->getOperand(0) == Op1 || Op0->getOperand(1) == Op1))
+        return DAG.getNode(SystemZISD::VACC, SDLoc(N0), VT, Op0->getOperand(0),
+                           Op0->getOperand(1));
+      break;
+    default:
+      break;
+    }
+  }
 
   return SDValue();
 }

diff  --git a/llvm/lib/Target/SystemZ/SystemZInstrVector.td b/llvm/lib/Target/SystemZ/SystemZInstrVector.td
index db957bb7c0294..29c92915c2317 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrVector.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrVector.td
@@ -1287,6 +1287,23 @@ let Predicates = [FeatureVectorEnhancements3] in {
             (VMNLQ VR128:$x, VR128:$y)>;
 }
 
+// Instantiate comparison patterns to recognize VACC/VSCBI for TYPE.
+multiclass IntegerComputeCarryOrBorrow<ValueType type,
+                                       Instruction vacc, Instruction vscbi> {
+  let Predicates = [FeatureVector] in {
+    def : Pat<(z_vzext1 (type (z_vicmphl VR128:$x, (add VR128:$x, VR128:$y)))),
+              (vacc VR128:$x, VR128:$y)>;
+    def : Pat<(z_vzext1 (type (z_vicmphl VR128:$y, (add VR128:$x, VR128:$y)))),
+              (vacc VR128:$x, VR128:$y)>;
+    def : Pat<(z_vzext1 (z_vnot (type (z_vicmphl VR128:$y, VR128:$x)))),
+              (vscbi VR128:$x, VR128:$y)>;
+  }
+}
+defm : IntegerComputeCarryOrBorrow<v16i8, VACCB, VSCBIB>;
+defm : IntegerComputeCarryOrBorrow<v8i16, VACCH, VSCBIH>;
+defm : IntegerComputeCarryOrBorrow<v4i32, VACCF, VSCBIF>;
+defm : IntegerComputeCarryOrBorrow<v2i64, VACCG, VSCBIG>;
+
 // Instantiate full-vector shifts.
 multiclass FullVectorShiftOps<SDPatternOperator shift,
                               Instruction sbit, Instruction sbyte> {

diff  --git a/llvm/lib/Target/SystemZ/SystemZOperators.td b/llvm/lib/Target/SystemZ/SystemZOperators.td
index 1cc153b79e289..8d7ee50c08742 100644
--- a/llvm/lib/Target/SystemZ/SystemZOperators.td
+++ b/llvm/lib/Target/SystemZ/SystemZOperators.td
@@ -1058,6 +1058,13 @@ def z_vneg : PatFrag<(ops node:$x), (sub immAllZerosV, node:$x)>;
 // Bitwise negation on vectors.
 def z_vnot : PatFrag<(ops node:$x), (xor node:$x, immAllOnesV)>;
 
+// In-register element-wise zero extension from i1 on vectors.
+def vsplat_imm_eq_1 : PatFrag<(ops), (build_vector), [{
+  APInt Imm;
+  return ISD::isConstantSplatVector(N, Imm) && Imm == 1;
+}]>;
+def z_vzext1 : PatFrag<(ops node:$x), (and node:$x, vsplat_imm_eq_1)>;
+
 // Signed "integer greater than zero" on vectors.
 def z_vicmph_zero : PatFrag<(ops node:$x), (z_vicmph node:$x, immAllZerosV)>;
 

diff  --git a/llvm/test/CodeGen/SystemZ/int-cmp-65.ll b/llvm/test/CodeGen/SystemZ/int-cmp-65.ll
new file mode 100644
index 0000000000000..b06ab3c1fa3d3
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/int-cmp-65.ll
@@ -0,0 +1,87 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test usage of VACC/VSCBI.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+define i128 @i128_subc_1(i128 %a, i128 %b) unnamed_addr {
+; CHECK-LABEL: i128_subc_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl %v0, 0(%r4), 3
+; CHECK-NEXT:    vl %v1, 0(%r3), 3
+; CHECK-NEXT:    vscbiq %v0, %v1, %v0
+; CHECK-NEXT:    vst %v0, 0(%r2), 3
+; CHECK-NEXT:    br %r14
+  %cmp = icmp uge i128 %a, %b
+  %ext = zext i1 %cmp to i128
+  ret i128 %ext
+}
+
+define i128 @i128_subc_2(i128 %a, i128 %b) unnamed_addr {
+; CHECK-LABEL: i128_subc_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl %v0, 0(%r3), 3
+; CHECK-NEXT:    vl %v1, 0(%r4), 3
+; CHECK-NEXT:    vscbiq %v0, %v1, %v0
+; CHECK-NEXT:    vst %v0, 0(%r2), 3
+; CHECK-NEXT:    br %r14
+  %cmp = icmp ule i128 %a, %b
+  %ext = zext i1 %cmp to i128
+  ret i128 %ext
+}
+
+define i128 @i128_addc_1(i128 %a, i128 %b) {
+; CHECK-LABEL: i128_addc_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl %v0, 0(%r4), 3
+; CHECK-NEXT:    vl %v1, 0(%r3), 3
+; CHECK-NEXT:    vaccq %v0, %v1, %v0
+; CHECK-NEXT:    vst %v0, 0(%r2), 3
+; CHECK-NEXT:    br %r14
+  %sum = add i128 %a, %b
+  %cmp = icmp ult i128 %sum, %a
+  %ext = zext i1 %cmp to i128
+  ret i128 %ext
+}
+
+define i128 @i128_addc_2(i128 %a, i128 %b) {
+; CHECK-LABEL: i128_addc_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl %v0, 0(%r4), 3
+; CHECK-NEXT:    vl %v1, 0(%r3), 3
+; CHECK-NEXT:    vaccq %v0, %v1, %v0
+; CHECK-NEXT:    vst %v0, 0(%r2), 3
+; CHECK-NEXT:    br %r14
+  %sum = add i128 %a, %b
+  %cmp = icmp ult i128 %sum, %b
+  %ext = zext i1 %cmp to i128
+  ret i128 %ext
+}
+
+define i128 @i128_addc_3(i128 %a, i128 %b) {
+; CHECK-LABEL: i128_addc_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl %v0, 0(%r4), 3
+; CHECK-NEXT:    vl %v1, 0(%r3), 3
+; CHECK-NEXT:    vaccq %v0, %v1, %v0
+; CHECK-NEXT:    vst %v0, 0(%r2), 3
+; CHECK-NEXT:    br %r14
+  %sum = add i128 %a, %b
+  %cmp = icmp ugt i128 %a, %sum
+  %ext = zext i1 %cmp to i128
+  ret i128 %ext
+}
+
+define i128 @i128_addc_4(i128 %a, i128 %b) {
+; CHECK-LABEL: i128_addc_4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl %v0, 0(%r4), 3
+; CHECK-NEXT:    vl %v1, 0(%r3), 3
+; CHECK-NEXT:    vaccq %v0, %v1, %v0
+; CHECK-NEXT:    vst %v0, 0(%r2), 3
+; CHECK-NEXT:    br %r14
+  %sum = add i128 %a, %b
+  %cmp = icmp ugt i128 %b, %sum
+  %ext = zext i1 %cmp to i128
+  ret i128 %ext
+}
+

diff  --git a/llvm/test/CodeGen/SystemZ/vec-cmp-10.ll b/llvm/test/CodeGen/SystemZ/vec-cmp-10.ll
new file mode 100644
index 0000000000000..227546150c673
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-cmp-10.ll
@@ -0,0 +1,260 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test usage of VACC/VSCBI.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+define <16 x i8> @v16i8_subc_1(<16 x i8> %a, <16 x i8> %b) unnamed_addr {
+; CHECK-LABEL: v16i8_subc_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vscbib %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %cmp = icmp uge <16 x i8> %a, %b
+  %ext = zext <16 x i1> %cmp to <16 x i8>
+  ret <16 x i8> %ext
+}
+
+define <16 x i8> @v16i8_subc_2(<16 x i8> %a, <16 x i8> %b) unnamed_addr {
+; CHECK-LABEL: v16i8_subc_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vscbib %v24, %v26, %v24
+; CHECK-NEXT:    br %r14
+  %cmp = icmp ule <16 x i8> %a, %b
+  %ext = zext <16 x i1> %cmp to <16 x i8>
+  ret <16 x i8> %ext
+}
+
+define <16 x i8> @v16i8_addc_1(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: v16i8_addc_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccb %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <16 x i8> %a, %b
+  %cmp = icmp ult <16 x i8> %sum, %a
+  %ext = zext <16 x i1> %cmp to <16 x i8>
+  ret <16 x i8> %ext
+}
+
+define <16 x i8> @v16i8_addc_2(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: v16i8_addc_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccb %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <16 x i8> %a, %b
+  %cmp = icmp ult <16 x i8> %sum, %b
+  %ext = zext <16 x i1> %cmp to <16 x i8>
+  ret <16 x i8> %ext
+}
+
+define <16 x i8> @v16i8_addc_3(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: v16i8_addc_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccb %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <16 x i8> %a, %b
+  %cmp = icmp ugt <16 x i8> %a, %sum
+  %ext = zext <16 x i1> %cmp to <16 x i8>
+  ret <16 x i8> %ext
+}
+
+define <16 x i8> @v16i8_addc_4(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: v16i8_addc_4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccb %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <16 x i8> %a, %b
+  %cmp = icmp ugt <16 x i8> %b, %sum
+  %ext = zext <16 x i1> %cmp to <16 x i8>
+  ret <16 x i8> %ext
+}
+
+define <8 x i16> @v8i16_subc_1(<8 x i16> %a, <8 x i16> %b) unnamed_addr {
+; CHECK-LABEL: v8i16_subc_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vscbih %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %cmp = icmp uge <8 x i16> %a, %b
+  %ext = zext <8 x i1> %cmp to <8 x i16>
+  ret <8 x i16> %ext
+}
+
+define <8 x i16> @v8i16_subc_2(<8 x i16> %a, <8 x i16> %b) unnamed_addr {
+; CHECK-LABEL: v8i16_subc_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vscbih %v24, %v26, %v24
+; CHECK-NEXT:    br %r14
+  %cmp = icmp ule <8 x i16> %a, %b
+  %ext = zext <8 x i1> %cmp to <8 x i16>
+  ret <8 x i16> %ext
+}
+
+define <8 x i16> @v8i16_addc_1(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: v8i16_addc_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vacch %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <8 x i16> %a, %b
+  %cmp = icmp ult <8 x i16> %sum, %a
+  %ext = zext <8 x i1> %cmp to <8 x i16>
+  ret <8 x i16> %ext
+}
+
+define <8 x i16> @v8i16_addc_2(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: v8i16_addc_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vacch %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <8 x i16> %a, %b
+  %cmp = icmp ult <8 x i16> %sum, %b
+  %ext = zext <8 x i1> %cmp to <8 x i16>
+  ret <8 x i16> %ext
+}
+
+define <8 x i16> @v8i16_addc_3(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: v8i16_addc_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vacch %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <8 x i16> %a, %b
+  %cmp = icmp ugt <8 x i16> %a, %sum
+  %ext = zext <8 x i1> %cmp to <8 x i16>
+  ret <8 x i16> %ext
+}
+
+define <8 x i16> @v8i16_addc_4(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: v8i16_addc_4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vacch %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <8 x i16> %a, %b
+  %cmp = icmp ugt <8 x i16> %b, %sum
+  %ext = zext <8 x i1> %cmp to <8 x i16>
+  ret <8 x i16> %ext
+}
+
+define <4 x i32> @v4i32_subc_1(<4 x i32> %a, <4 x i32> %b) unnamed_addr {
+; CHECK-LABEL: v4i32_subc_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vscbif %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %cmp = icmp uge <4 x i32> %a, %b
+  %ext = zext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %ext
+}
+
+define <4 x i32> @v4i32_subc_2(<4 x i32> %a, <4 x i32> %b) unnamed_addr {
+; CHECK-LABEL: v4i32_subc_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vscbif %v24, %v26, %v24
+; CHECK-NEXT:    br %r14
+  %cmp = icmp ule <4 x i32> %a, %b
+  %ext = zext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %ext
+}
+
+define <4 x i32> @v4i32_addc_1(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: v4i32_addc_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccf %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <4 x i32> %a, %b
+  %cmp = icmp ult <4 x i32> %sum, %a
+  %ext = zext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %ext
+}
+
+define <4 x i32> @v4i32_addc_2(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: v4i32_addc_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccf %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <4 x i32> %a, %b
+  %cmp = icmp ult <4 x i32> %sum, %b
+  %ext = zext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %ext
+}
+
+define <4 x i32> @v4i32_addc_3(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: v4i32_addc_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccf %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <4 x i32> %a, %b
+  %cmp = icmp ugt <4 x i32> %a, %sum
+  %ext = zext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %ext
+}
+
+define <4 x i32> @v4i32_addc_4(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: v4i32_addc_4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccf %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <4 x i32> %a, %b
+  %cmp = icmp ugt <4 x i32> %b, %sum
+  %ext = zext <4 x i1> %cmp to <4 x i32>
+  ret <4 x i32> %ext
+}
+
+define <2 x i64> @v2i64_subc_1(<2 x i64> %a, <2 x i64> %b) unnamed_addr {
+; CHECK-LABEL: v2i64_subc_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vscbig %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %cmp = icmp uge <2 x i64> %a, %b
+  %ext = zext <2 x i1> %cmp to <2 x i64>
+  ret <2 x i64> %ext
+}
+
+define <2 x i64> @v2i64_subc_2(<2 x i64> %a, <2 x i64> %b) unnamed_addr {
+; CHECK-LABEL: v2i64_subc_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vscbig %v24, %v26, %v24
+; CHECK-NEXT:    br %r14
+  %cmp = icmp ule <2 x i64> %a, %b
+  %ext = zext <2 x i1> %cmp to <2 x i64>
+  ret <2 x i64> %ext
+}
+
+define <2 x i64> @v2i64_addc_1(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: v2i64_addc_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccg %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <2 x i64> %a, %b
+  %cmp = icmp ult <2 x i64> %sum, %a
+  %ext = zext <2 x i1> %cmp to <2 x i64>
+  ret <2 x i64> %ext
+}
+
+define <2 x i64> @v2i64_addc_2(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: v2i64_addc_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccg %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <2 x i64> %a, %b
+  %cmp = icmp ult <2 x i64> %sum, %b
+  %ext = zext <2 x i1> %cmp to <2 x i64>
+  ret <2 x i64> %ext
+}
+
+define <2 x i64> @v2i64_addc_3(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: v2i64_addc_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccg %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <2 x i64> %a, %b
+  %cmp = icmp ugt <2 x i64> %a, %sum
+  %ext = zext <2 x i1> %cmp to <2 x i64>
+  ret <2 x i64> %ext
+}
+
+define <2 x i64> @v2i64_addc_4(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: v2i64_addc_4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vaccg %v24, %v24, %v26
+; CHECK-NEXT:    br %r14
+  %sum = add <2 x i64> %a, %b
+  %cmp = icmp ugt <2 x i64> %b, %sum
+  %ext = zext <2 x i1> %cmp to <2 x i64>
+  ret <2 x i64> %ext
+}


        


More information about the llvm-commits mailing list