[llvm] a9018fd - [X86] Add more add/sub carry tests

David Zarzycki via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 12 01:37:25 PST 2019


Author: David Zarzycki
Date: 2019-11-12T11:36:59+02:00
New Revision: a9018fddf9ba17e53a35674bf0a93e78382c4c23

URL: https://github.com/llvm/llvm-project/commit/a9018fddf9ba17e53a35674bf0a93e78382c4c23
DIFF: https://github.com/llvm/llvm-project/commit/a9018fddf9ba17e53a35674bf0a93e78382c4c23.diff

LOG: [X86] Add more add/sub carry tests

Preparation for: https://reviews.llvm.org/D70079

https://reviews.llvm.org/D70077

Added: 
    

Modified: 
    llvm/test/CodeGen/SystemZ/int-uadd-03.ll
    llvm/test/CodeGen/SystemZ/int-usub-03.ll
    llvm/test/CodeGen/X86/addcarry.ll
    llvm/test/CodeGen/X86/subcarry.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/SystemZ/int-uadd-03.ll b/llvm/test/CodeGen/SystemZ/int-uadd-03.ll
index d57f8a84411a..b7b9883ecc92 100644
--- a/llvm/test/CodeGen/SystemZ/int-uadd-03.ll
+++ b/llvm/test/CodeGen/SystemZ/int-uadd-03.ll
@@ -199,7 +199,7 @@ define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, i64 *%res) {
 define zeroext i1 @f11(i32 *%ptr0) {
 ; CHECK-LABEL: f11:
 ; CHECK: brasl %r14, foo at PLT
-; CHECK: algf %r2, 160(%r15)
+; CHECK: algf {{%r[0-9]+}}, 160(%r15)
 ; CHECK: br %r14
   %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
   %ptr2 = getelementptr i32, i32 *%ptr0, i64 4

diff  --git a/llvm/test/CodeGen/SystemZ/int-usub-03.ll b/llvm/test/CodeGen/SystemZ/int-usub-03.ll
index 4e5f99fcee25..5e0a947772c5 100644
--- a/llvm/test/CodeGen/SystemZ/int-usub-03.ll
+++ b/llvm/test/CodeGen/SystemZ/int-usub-03.ll
@@ -207,7 +207,7 @@ define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, i64 *%res) {
 define zeroext i1 @f11(i32 *%ptr0) {
 ; CHECK-LABEL: f11:
 ; CHECK: brasl %r14, foo at PLT
-; CHECK: slgf %r2, 160(%r15)
+; CHECK: slgf {{%r[0-9]+}}, 160(%r15)
 ; CHECK: br %r14
   %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
   %ptr2 = getelementptr i32, i32 *%ptr0, i64 4

diff  --git a/llvm/test/CodeGen/X86/addcarry.ll b/llvm/test/CodeGen/X86/addcarry.ll
index 6c70fee99090..33d2890a7257 100644
--- a/llvm/test/CodeGen/X86/addcarry.ll
+++ b/llvm/test/CodeGen/X86/addcarry.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s
 
+declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) #1
+
 define i128 @add128(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: add128:
 ; CHECK:       # %bb.0: # %entry
@@ -411,3 +413,419 @@ define i128 @addcarry_to_subcarry(i64 %a, i64 %b) {
   %sub2 = add i128 %sum2, %notb128
   ret i128 %sub2
 }
+
+%struct.U320 = type { [5 x i64] }
+
+define i32 @add_U320_without_i128_add(%struct.U320* nocapture dereferenceable(40) %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5) {
+; CHECK-LABEL: add_U320_without_i128_add:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %r14
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    .cfi_def_cfa_offset 24
+; CHECK-NEXT:    .cfi_offset %rbx, -24
+; CHECK-NEXT:    .cfi_offset %r14, -16
+; CHECK-NEXT:    movq 16(%rdi), %rax
+; CHECK-NEXT:    leaq (%rax,%rcx), %r10
+; CHECK-NEXT:    addq %rsi, (%rdi)
+; CHECK-NEXT:    adcq %rdx, 8(%rdi)
+; CHECK-NEXT:    movq %rax, %rdx
+; CHECK-NEXT:    adcq %rcx, %rdx
+; CHECK-NEXT:    movq 24(%rdi), %r11
+; CHECK-NEXT:    leaq (%r8,%r11), %r14
+; CHECK-NEXT:    xorl %ebx, %ebx
+; CHECK-NEXT:    cmpq %r10, %rdx
+; CHECK-NEXT:    setb %bl
+; CHECK-NEXT:    addq %rcx, %rax
+; CHECK-NEXT:    adcq %r14, %rbx
+; CHECK-NEXT:    movq 32(%rdi), %r10
+; CHECK-NEXT:    leaq (%r9,%r10), %rcx
+; CHECK-NEXT:    xorl %esi, %esi
+; CHECK-NEXT:    cmpq %r14, %rbx
+; CHECK-NEXT:    setb %sil
+; CHECK-NEXT:    addq %r11, %r8
+; CHECK-NEXT:    adcq %rcx, %rsi
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    cmpq %rcx, %rsi
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    addq %r10, %r9
+; CHECK-NEXT:    movq %rdx, 16(%rdi)
+; CHECK-NEXT:    movq %rbx, 24(%rdi)
+; CHECK-NEXT:    movq %rsi, 32(%rdi)
+; CHECK-NEXT:    adcl $0, %eax
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    popq %r14
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
+  %7 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 0
+  %8 = load i64, i64* %7, align 8
+  %9 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 1
+  %10 = load i64, i64* %9, align 8
+  %11 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 2
+  %12 = load i64, i64* %11, align 8
+  %13 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 3
+  %14 = load i64, i64* %13, align 8
+  %15 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 4
+  %16 = load i64, i64* %15, align 8
+  %17 = add i64 %8, %1
+  %18 = add i64 %10, %2
+  %19 = icmp ult i64 %17, %1
+  %20 = zext i1 %19 to i64
+  %21 = add i64 %18, %20
+  %22 = add i64 %12, %3
+  %23 = icmp ult i64 %18, %10
+  %24 = zext i1 %23 to i64
+  %25 = icmp ult i64 %21, %18
+  %26 = zext i1 %25 to i64
+  %27 = add i64 %22, %24
+  %28 = add i64 %27, %26
+  %29 = add i64 %14, %4
+  %30 = icmp ult i64 %22, %12
+  %31 = zext i1 %30 to i64
+  %32 = icmp ult i64 %28, %22
+  %33 = zext i1 %32 to i64
+  %34 = add i64 %29, %31
+  %35 = add i64 %34, %33
+  %36 = add i64 %16, %5
+  %37 = icmp ult i64 %29, %14
+  %38 = zext i1 %37 to i64
+  %39 = icmp ult i64 %35, %29
+  %40 = zext i1 %39 to i64
+  %41 = add i64 %36, %38
+  %42 = add i64 %41, %40
+  store i64 %17, i64* %7, align 8
+  store i64 %21, i64* %9, align 8
+  store i64 %28, i64* %11, align 8
+  store i64 %35, i64* %13, align 8
+  store i64 %42, i64* %15, align 8
+  %43 = icmp ult i64 %36, %16
+  %44 = zext i1 %43 to i32
+  %45 = icmp ult i64 %42, %36
+  %46 = zext i1 %45 to i32
+  %47 = add nuw nsw i32 %46, %44
+  ret i32 %47
+}
+
+define i32 @add_U320_without_i128_or(%struct.U320* nocapture dereferenceable(40) %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5) {
+; CHECK-LABEL: add_U320_without_i128_or:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset %rbx, -16
+; CHECK-NEXT:    addq 8(%rdi), %rdx
+; CHECK-NEXT:    setb %r10b
+; CHECK-NEXT:    addq %rsi, (%rdi)
+; CHECK-NEXT:    adcq $0, %rdx
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    addq 16(%rdi), %rcx
+; CHECK-NEXT:    setb %r11b
+; CHECK-NEXT:    orb %r10b, %al
+; CHECK-NEXT:    movzbl %al, %ebx
+; CHECK-NEXT:    addq %rcx, %rbx
+; CHECK-NEXT:    setb %cl
+; CHECK-NEXT:    addq 24(%rdi), %r8
+; CHECK-NEXT:    setb %r10b
+; CHECK-NEXT:    orb %r11b, %cl
+; CHECK-NEXT:    movzbl %cl, %esi
+; CHECK-NEXT:    addq %r8, %rsi
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    addq 32(%rdi), %r9
+; CHECK-NEXT:    setb %r8b
+; CHECK-NEXT:    orb %r10b, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    addq %r9, %rax
+; CHECK-NEXT:    setb %cl
+; CHECK-NEXT:    movq %rdx, 8(%rdi)
+; CHECK-NEXT:    movq %rbx, 16(%rdi)
+; CHECK-NEXT:    movq %rsi, 24(%rdi)
+; CHECK-NEXT:    movq %rax, 32(%rdi)
+; CHECK-NEXT:    orb %r8b, %cl
+; CHECK-NEXT:    movzbl %cl, %eax
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
+  %7 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 0
+  %8 = load i64, i64* %7, align 8
+  %9 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 1
+  %10 = load i64, i64* %9, align 8
+  %11 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 2
+  %12 = load i64, i64* %11, align 8
+  %13 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 3
+  %14 = load i64, i64* %13, align 8
+  %15 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 4
+  %16 = load i64, i64* %15, align 8
+  %17 = add i64 %8, %1
+  %18 = add i64 %10, %2
+  %19 = icmp ult i64 %17, %1
+  %20 = zext i1 %19 to i64
+  %21 = add i64 %18, %20
+  %22 = add i64 %12, %3
+  %23 = icmp ult i64 %18, %10
+  %24 = icmp ult i64 %21, %18
+  %25 = or i1 %23, %24
+  %26 = zext i1 %25 to i64
+  %27 = add i64 %22, %26
+  %28 = add i64 %14, %4
+  %29 = icmp ult i64 %22, %12
+  %30 = icmp ult i64 %27, %22
+  %31 = or i1 %29, %30
+  %32 = zext i1 %31 to i64
+  %33 = add i64 %28, %32
+  %34 = add i64 %16, %5
+  %35 = icmp ult i64 %28, %14
+  %36 = icmp ult i64 %33, %28
+  %37 = or i1 %35, %36
+  %38 = zext i1 %37 to i64
+  %39 = add i64 %34, %38
+  store i64 %17, i64* %7, align 8
+  store i64 %21, i64* %9, align 8
+  store i64 %27, i64* %11, align 8
+  store i64 %33, i64* %13, align 8
+  store i64 %39, i64* %15, align 8
+  %40 = icmp ult i64 %34, %16
+  %41 = icmp ult i64 %39, %34
+  %42 = or i1 %40, %41
+  %43 = zext i1 %42 to i32
+  ret i32 %43
+}
+
+define i32 @add_U320_without_i128_xor(%struct.U320* nocapture dereferenceable(40) %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5) {
+; CHECK-LABEL: add_U320_without_i128_xor:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset %rbx, -16
+; CHECK-NEXT:    addq 8(%rdi), %rdx
+; CHECK-NEXT:    setb %r10b
+; CHECK-NEXT:    addq %rsi, (%rdi)
+; CHECK-NEXT:    adcq $0, %rdx
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    addq 16(%rdi), %rcx
+; CHECK-NEXT:    setb %r11b
+; CHECK-NEXT:    xorb %r10b, %al
+; CHECK-NEXT:    movzbl %al, %ebx
+; CHECK-NEXT:    addq %rcx, %rbx
+; CHECK-NEXT:    setb %cl
+; CHECK-NEXT:    addq 24(%rdi), %r8
+; CHECK-NEXT:    setb %r10b
+; CHECK-NEXT:    xorb %r11b, %cl
+; CHECK-NEXT:    movzbl %cl, %esi
+; CHECK-NEXT:    addq %r8, %rsi
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    addq 32(%rdi), %r9
+; CHECK-NEXT:    setb %r8b
+; CHECK-NEXT:    xorb %r10b, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    addq %r9, %rax
+; CHECK-NEXT:    setb %cl
+; CHECK-NEXT:    movq %rdx, 8(%rdi)
+; CHECK-NEXT:    movq %rbx, 16(%rdi)
+; CHECK-NEXT:    movq %rsi, 24(%rdi)
+; CHECK-NEXT:    movq %rax, 32(%rdi)
+; CHECK-NEXT:    xorb %r8b, %cl
+; CHECK-NEXT:    movzbl %cl, %eax
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
+  %7 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 0
+  %8 = load i64, i64* %7, align 8
+  %9 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 1
+  %10 = load i64, i64* %9, align 8
+  %11 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 2
+  %12 = load i64, i64* %11, align 8
+  %13 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 3
+  %14 = load i64, i64* %13, align 8
+  %15 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 4
+  %16 = load i64, i64* %15, align 8
+  %17 = add i64 %8, %1
+  %18 = add i64 %10, %2
+  %19 = icmp ult i64 %17, %1
+  %20 = zext i1 %19 to i64
+  %21 = add i64 %18, %20
+  %22 = add i64 %12, %3
+  %23 = icmp ult i64 %18, %10
+  %24 = icmp ult i64 %21, %18
+  %25 = xor i1 %23, %24
+  %26 = zext i1 %25 to i64
+  %27 = add i64 %22, %26
+  %28 = add i64 %14, %4
+  %29 = icmp ult i64 %22, %12
+  %30 = icmp ult i64 %27, %22
+  %31 = xor i1 %29, %30
+  %32 = zext i1 %31 to i64
+  %33 = add i64 %28, %32
+  %34 = add i64 %16, %5
+  %35 = icmp ult i64 %28, %14
+  %36 = icmp ult i64 %33, %28
+  %37 = xor i1 %35, %36
+  %38 = zext i1 %37 to i64
+  %39 = add i64 %34, %38
+  store i64 %17, i64* %7, align 8
+  store i64 %21, i64* %9, align 8
+  store i64 %27, i64* %11, align 8
+  store i64 %33, i64* %13, align 8
+  store i64 %39, i64* %15, align 8
+  %40 = icmp ult i64 %34, %16
+  %41 = icmp ult i64 %39, %34
+  %42 = xor i1 %40, %41
+  %43 = zext i1 %42 to i32
+  ret i32 %43
+}
+
+define void @add_U320_without_i128_or_no_ret(%struct.U320* nocapture dereferenceable(40) %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5) {
+; CHECK-LABEL: add_U320_without_i128_or_no_ret:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addq 8(%rdi), %rdx
+; CHECK-NEXT:    setb %r10b
+; CHECK-NEXT:    addq %rsi, (%rdi)
+; CHECK-NEXT:    adcq $0, %rdx
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    addq 16(%rdi), %rcx
+; CHECK-NEXT:    setb %r11b
+; CHECK-NEXT:    orb %r10b, %al
+; CHECK-NEXT:    movzbl %al, %esi
+; CHECK-NEXT:    addq %rcx, %rsi
+; CHECK-NEXT:    setb %cl
+; CHECK-NEXT:    addq 24(%rdi), %r8
+; CHECK-NEXT:    setb %r10b
+; CHECK-NEXT:    orb %r11b, %cl
+; CHECK-NEXT:    movzbl %cl, %ecx
+; CHECK-NEXT:    addq %r8, %rcx
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    addq 32(%rdi), %r9
+; CHECK-NEXT:    orb %r10b, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    addq %r9, %rax
+; CHECK-NEXT:    movq %rdx, 8(%rdi)
+; CHECK-NEXT:    movq %rsi, 16(%rdi)
+; CHECK-NEXT:    movq %rcx, 24(%rdi)
+; CHECK-NEXT:    movq %rax, 32(%rdi)
+; CHECK-NEXT:    retq
+  %7 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 0
+  %8 = load i64, i64* %7, align 8
+  %9 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 1
+  %10 = load i64, i64* %9, align 8
+  %11 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 2
+  %12 = load i64, i64* %11, align 8
+  %13 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 3
+  %14 = load i64, i64* %13, align 8
+  %15 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 4
+  %16 = load i64, i64* %15, align 8
+  %17 = add i64 %8, %1
+  %18 = add i64 %10, %2
+  %19 = icmp ult i64 %17, %1
+  %20 = zext i1 %19 to i64
+  %21 = add i64 %18, %20
+  %22 = add i64 %12, %3
+  %23 = icmp ult i64 %18, %10
+  %24 = icmp ult i64 %21, %18
+  %25 = or i1 %23, %24
+  %26 = zext i1 %25 to i64
+  %27 = add i64 %22, %26
+  %28 = add i64 %14, %4
+  %29 = icmp ult i64 %22, %12
+  %30 = icmp ult i64 %27, %22
+  %31 = or i1 %29, %30
+  %32 = zext i1 %31 to i64
+  %33 = add i64 %28, %32
+  %34 = add i64 %16, %5
+  %35 = icmp ult i64 %28, %14
+  %36 = icmp ult i64 %33, %28
+  %37 = or i1 %35, %36
+  %38 = zext i1 %37 to i64
+  %39 = add i64 %34, %38
+  store i64 %17, i64* %7, align 8
+  store i64 %21, i64* %9, align 8
+  store i64 %27, i64* %11, align 8
+  store i64 %33, i64* %13, align 8
+  store i64 %39, i64* %15, align 8
+  ret void
+}
+
+define i32 @add_U320_uaddo(%struct.U320* nocapture dereferenceable(40) %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5) {
+; CHECK-LABEL: add_U320_uaddo:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addq 8(%rdi), %rdx
+; CHECK-NEXT:    setb %r10b
+; CHECK-NEXT:    addq %rsi, (%rdi)
+; CHECK-NEXT:    adcq $0, %rdx
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    orb %r10b, %al
+; CHECK-NEXT:    movzbl %al, %esi
+; CHECK-NEXT:    addq 16(%rdi), %rcx
+; CHECK-NEXT:    setb %r10b
+; CHECK-NEXT:    addq %rsi, %rcx
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    orb %r10b, %al
+; CHECK-NEXT:    movzbl %al, %esi
+; CHECK-NEXT:    addq 24(%rdi), %r8
+; CHECK-NEXT:    setb %r10b
+; CHECK-NEXT:    addq %rsi, %r8
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    orb %r10b, %al
+; CHECK-NEXT:    movzbl %al, %esi
+; CHECK-NEXT:    addq 32(%rdi), %r9
+; CHECK-NEXT:    setb %r10b
+; CHECK-NEXT:    addq %rsi, %r9
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    orb %r10b, %al
+; CHECK-NEXT:    movq %rdx, 8(%rdi)
+; CHECK-NEXT:    movq %rcx, 16(%rdi)
+; CHECK-NEXT:    movq %r8, 24(%rdi)
+; CHECK-NEXT:    movq %r9, 32(%rdi)
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    retq
+  %7 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 0
+  %8 = load i64, i64* %7, align 8
+  %9 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 1
+  %10 = load i64, i64* %9, align 8
+  %11 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 2
+  %12 = load i64, i64* %11, align 8
+  %13 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 3
+  %14 = load i64, i64* %13, align 8
+  %15 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 4
+  %16 = load i64, i64* %15, align 8
+  %17 = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %8, i64 %1)
+  %18 = extractvalue { i64, i1 } %17, 1
+  %19 = extractvalue { i64, i1 } %17, 0
+  %20 = zext i1 %18 to i64
+  %21 = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %10, i64 %2)
+  %22 = extractvalue { i64, i1 } %21, 1
+  %23 = extractvalue { i64, i1 } %21, 0
+  %24 = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %23, i64 %20)
+  %25 = extractvalue { i64, i1 } %24, 1
+  %26 = extractvalue { i64, i1 } %24, 0
+  %27 = or i1 %22, %25
+  %28 = zext i1 %27 to i64
+  %29 = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %12, i64 %3)
+  %30 = extractvalue { i64, i1 } %29, 1
+  %31 = extractvalue { i64, i1 } %29, 0
+  %32 = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %31, i64 %28)
+  %33 = extractvalue { i64, i1 } %32, 1
+  %34 = extractvalue { i64, i1 } %32, 0
+  %35 = or i1 %30, %33
+  %36 = zext i1 %35 to i64
+  %37 = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %14, i64 %4)
+  %38 = extractvalue { i64, i1 } %37, 1
+  %39 = extractvalue { i64, i1 } %37, 0
+  %40 = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %39, i64 %36)
+  %41 = extractvalue { i64, i1 } %40, 1
+  %42 = extractvalue { i64, i1 } %40, 0
+  %43 = or i1 %38, %41
+  %44 = zext i1 %43 to i64
+  %45 = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %16, i64 %5)
+  %46 = extractvalue { i64, i1 } %45, 1
+  %47 = extractvalue { i64, i1 } %45, 0
+  %48 = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %47, i64 %44)
+  %49 = extractvalue { i64, i1 } %48, 1
+  %50 = extractvalue { i64, i1 } %48, 0
+  %51 = or i1 %46, %49
+  store i64 %19, i64* %7, align 8
+  store i64 %26, i64* %9, align 8
+  store i64 %34, i64* %11, align 8
+  store i64 %42, i64* %13, align 8
+  store i64 %50, i64* %15, align 8
+  %52 = zext i1 %51 to i32
+  ret i32 %52
+}

diff  --git a/llvm/test/CodeGen/X86/subcarry.ll b/llvm/test/CodeGen/X86/subcarry.ll
index 75ee6ef5921b..745f018e9f04 100644
--- a/llvm/test/CodeGen/X86/subcarry.ll
+++ b/llvm/test/CodeGen/X86/subcarry.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s
 
+declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) #1
+
 define i128 @sub128(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: sub128:
 ; CHECK:       # %bb.0: # %entry
@@ -87,7 +89,7 @@ entry:
   ret %S %31
 }
 
-define %S @sub(%S* nocapture readonly %this, %S %arg.b) local_unnamed_addr {
+define %S @sub(%S* nocapture readonly %this, %S %arg.b) {
 ; CHECK-LABEL: sub:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq %rdi, %rax
@@ -183,3 +185,202 @@ define i64 @sub_from_carry(i64 %x, i64 %y, i64* %valout, i64 %z) {
   %res = sub i64 %carry, %z
   ret i64 %res
 }
+
+%struct.U320 = type { [5 x i64] }
+
+define i32 @sub_U320_without_i128_or(%struct.U320* nocapture dereferenceable(40) %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5) {
+; CHECK-LABEL: sub_U320_without_i128_or:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %r14
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    .cfi_def_cfa_offset 24
+; CHECK-NEXT:    .cfi_offset %rbx, -24
+; CHECK-NEXT:    .cfi_offset %r14, -16
+; CHECK-NEXT:    movq 8(%rdi), %r14
+; CHECK-NEXT:    movq 16(%rdi), %r10
+; CHECK-NEXT:    movq 24(%rdi), %r11
+; CHECK-NEXT:    movq 32(%rdi), %rbx
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    subq %rsi, (%rdi)
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    subq %rdx, %r14
+; CHECK-NEXT:    setb %dl
+; CHECK-NEXT:    subq %rax, %r14
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    subq %rcx, %r10
+; CHECK-NEXT:    setb %cl
+; CHECK-NEXT:    orb %dl, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    subq %rax, %r10
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    subq %r8, %r11
+; CHECK-NEXT:    setb %dl
+; CHECK-NEXT:    orb %cl, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    subq %rax, %r11
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    subq %r9, %rbx
+; CHECK-NEXT:    setb %cl
+; CHECK-NEXT:    orb %dl, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    subq %rax, %rbx
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    movq %r14, 8(%rdi)
+; CHECK-NEXT:    movq %r10, 16(%rdi)
+; CHECK-NEXT:    movq %r11, 24(%rdi)
+; CHECK-NEXT:    movq %rbx, 32(%rdi)
+; CHECK-NEXT:    orb %cl, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    popq %r14
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
+  %7 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 0
+  %8 = load i64, i64* %7, align 8
+  %9 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 1
+  %10 = load i64, i64* %9, align 8
+  %11 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 2
+  %12 = load i64, i64* %11, align 8
+  %13 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 3
+  %14 = load i64, i64* %13, align 8
+  %15 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 4
+  %16 = load i64, i64* %15, align 8
+  %17 = sub i64 %8, %1
+  %18 = sub i64 %10, %2
+  %19 = icmp ult i64 %8, %1
+  %20 = zext i1 %19 to i64
+  %21 = sub i64 %18, %20
+  %22 = sub i64 %12, %3
+  %23 = icmp ult i64 %10, %2
+  %24 = icmp ult i64 %18, %20
+  %25 = or i1 %23, %24
+  %26 = zext i1 %25 to i64
+  %27 = sub i64 %22, %26
+  %28 = sub i64 %14, %4
+  %29 = icmp ult i64 %12, %3
+  %30 = icmp ult i64 %22, %26
+  %31 = or i1 %29, %30
+  %32 = zext i1 %31 to i64
+  %33 = sub i64 %28, %32
+  %34 = sub i64 %16, %5
+  %35 = icmp ult i64 %14, %4
+  %36 = icmp ult i64 %28, %32
+  %37 = or i1 %35, %36
+  %38 = zext i1 %37 to i64
+  %39 = sub i64 %34, %38
+  store i64 %17, i64* %7, align 8
+  store i64 %21, i64* %9, align 8
+  store i64 %27, i64* %11, align 8
+  store i64 %33, i64* %13, align 8
+  store i64 %39, i64* %15, align 8
+  %40 = icmp ult i64 %16, %5
+  %41 = icmp ult i64 %34, %38
+  %42 = or i1 %40, %41
+  %43 = zext i1 %42 to i32
+  ret i32 %43
+}
+
+define i32 @sub_U320_usubo(%struct.U320* nocapture dereferenceable(40) %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5) {
+; CHECK-LABEL: sub_U320_usubo:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %r14
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    .cfi_def_cfa_offset 24
+; CHECK-NEXT:    .cfi_offset %rbx, -24
+; CHECK-NEXT:    .cfi_offset %r14, -16
+; CHECK-NEXT:    movq 8(%rdi), %r14
+; CHECK-NEXT:    movq 16(%rdi), %r10
+; CHECK-NEXT:    movq 24(%rdi), %r11
+; CHECK-NEXT:    movq 32(%rdi), %rbx
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    subq %rsi, (%rdi)
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    subq %rdx, %r14
+; CHECK-NEXT:    setb %dl
+; CHECK-NEXT:    subq %rax, %r14
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    orb %dl, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    subq %rcx, %r10
+; CHECK-NEXT:    setb %cl
+; CHECK-NEXT:    subq %rax, %r10
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    orb %cl, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    subq %r8, %r11
+; CHECK-NEXT:    setb %cl
+; CHECK-NEXT:    subq %rax, %r11
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    orb %cl, %al
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    subq %r9, %rbx
+; CHECK-NEXT:    setb %cl
+; CHECK-NEXT:    subq %rax, %rbx
+; CHECK-NEXT:    setb %al
+; CHECK-NEXT:    orb %cl, %al
+; CHECK-NEXT:    movq %r14, 8(%rdi)
+; CHECK-NEXT:    movq %r10, 16(%rdi)
+; CHECK-NEXT:    movq %r11, 24(%rdi)
+; CHECK-NEXT:    movq %rbx, 32(%rdi)
+; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    popq %r14
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
+  %7 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 0
+  %8 = load i64, i64* %7, align 8
+  %9 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 1
+  %10 = load i64, i64* %9, align 8
+  %11 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 2
+  %12 = load i64, i64* %11, align 8
+  %13 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 3
+  %14 = load i64, i64* %13, align 8
+  %15 = getelementptr inbounds %struct.U320, %struct.U320* %0, i64 0, i32 0, i64 4
+  %16 = load i64, i64* %15, align 8
+  %17 = tail call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %8, i64 %1)
+  %18 = extractvalue { i64, i1 } %17, 1
+  %19 = extractvalue { i64, i1 } %17, 0
+  %20 = zext i1 %18 to i64
+  %21 = tail call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %10, i64 %2)
+  %22 = extractvalue { i64, i1 } %21, 1
+  %23 = extractvalue { i64, i1 } %21, 0
+  %24 = tail call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %23, i64 %20)
+  %25 = extractvalue { i64, i1 } %24, 1
+  %26 = extractvalue { i64, i1 } %24, 0
+  %27 = or i1 %22, %25
+  %28 = zext i1 %27 to i64
+  %29 = tail call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %12, i64 %3)
+  %30 = extractvalue { i64, i1 } %29, 1
+  %31 = extractvalue { i64, i1 } %29, 0
+  %32 = tail call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %31, i64 %28)
+  %33 = extractvalue { i64, i1 } %32, 1
+  %34 = extractvalue { i64, i1 } %32, 0
+  %35 = or i1 %30, %33
+  %36 = zext i1 %35 to i64
+  %37 = tail call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %14, i64 %4)
+  %38 = extractvalue { i64, i1 } %37, 1
+  %39 = extractvalue { i64, i1 } %37, 0
+  %40 = tail call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %39, i64 %36)
+  %41 = extractvalue { i64, i1 } %40, 1
+  %42 = extractvalue { i64, i1 } %40, 0
+  %43 = or i1 %38, %41
+  %44 = zext i1 %43 to i64
+  %45 = tail call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %16, i64 %5)
+  %46 = extractvalue { i64, i1 } %45, 1
+  %47 = extractvalue { i64, i1 } %45, 0
+  %48 = tail call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %47, i64 %44)
+  %49 = extractvalue { i64, i1 } %48, 1
+  %50 = extractvalue { i64, i1 } %48, 0
+  %51 = or i1 %46, %49
+  store i64 %19, i64* %7, align 8
+  store i64 %26, i64* %9, align 8
+  store i64 %34, i64* %11, align 8
+  store i64 %42, i64* %13, align 8
+  store i64 %50, i64* %15, align 8
+  %52 = zext i1 %51 to i32
+  ret i32 %52
+}


        


More information about the llvm-commits mailing list