[llvm] db17ebd - [AMDGPU][GlobalISel] Add end to end IR tests for add/sub with overflow

Abinav Puthan Purayil via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 31 09:17:14 PDT 2022


Author: Abinav Puthan Purayil
Date: 2022-03-31T21:46:34+05:30
New Revision: db17ebd593f67a9645aaabbbbf9f74afdfc1b6f1

URL: https://github.com/llvm/llvm-project/commit/db17ebd593f67a9645aaabbbbf9f74afdfc1b6f1
DIFF: https://github.com/llvm/llvm-project/commit/db17ebd593f67a9645aaabbbbf9f74afdfc1b6f1.diff

LOG: [AMDGPU][GlobalISel] Add end to end IR tests for add/sub with overflow

Differential Revision: https://reviews.llvm.org/D122818

Added: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/subo.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll
new file mode 100644
index 0000000000000..62468ad8b4376
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll
@@ -0,0 +1,1141 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+
+define i32 @v_uaddo_i32(i32 %a, i32 %b) {
+; GFX7-LABEL: v_uaddo_i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddo_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddo_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %uaddo = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %add = extractvalue {i32, i1} %uaddo, 0
+  %of = extractvalue {i32, i1} %uaddo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = add i32 %add, %of.zext
+  ret i32 %ret
+}
+
+define i64 @v_uaddo_i64(i64 %a, i64 %b) {
+; GFX7-LABEL: v_uaddo_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX7-NEXT:    v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddo_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddo_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %add = extractvalue {i64, i1} %uaddo, 0
+  %of = extractvalue {i64, i1} %uaddo, 1
+  %of.zext = zext i1 %of to i64
+  %ret = add i64 %add, %of.zext
+  ret i64 %ret
+}
+
+define i8 @v_uaddo_i8(i8 %a, i8 %b) {
+; GFX7-LABEL: v_uaddo_i8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_movk_i32 s4, 0xff
+; GFX7-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddo_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_movk_i32 s4, 0xff
+; GFX8-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddo_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT:    v_cmp_ne_u32_sdwa s[4:5], v0, v0 src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %uaddo = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
+  %add = extractvalue {i8, i1} %uaddo, 0
+  %of = extractvalue {i8, i1} %uaddo, 1
+  %of.zext = zext i1 %of to i8
+  %ret = add i8 %add, %of.zext
+  ret i8 %ret
+}
+
+define i7 @v_uaddo_i7(i7 %a, i7 %b) {
+; GFX7-LABEL: v_uaddo_i7:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_movk_i32 s4, 0x7f
+; GFX7-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddo_i7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_movk_i32 s4, 0x7f
+; GFX8-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddo_i7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_movk_i32 s4, 0x7f
+; GFX9-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %uaddo = call {i7, i1} @llvm.uadd.with.overflow.i7(i7 %a, i7 %b)
+  %add = extractvalue {i7, i1} %uaddo, 0
+  %of = extractvalue {i7, i1} %uaddo, 1
+  %of.zext = zext i1 %of to i7
+  %ret = add i7 %add, %of.zext
+  ret i7 %ret
+}
+
+define <2 x i32> @v_uaddo_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; GFX7-LABEL: v_uaddo_v2i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT:    v_cmp_lt_u32_e32 vcc, v0, v2
+; GFX7-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX7-NEXT:    v_cmp_lt_u32_e32 vcc, v1, v3
+; GFX7-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddo_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT:    v_cmp_lt_u32_e32 vcc, v0, v2
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_lt_u32_e32 vcc, v1, v3
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddo_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v0, v0, v2
+; GFX9-NEXT:    v_add_u32_e32 v1, v1, v3
+; GFX9-NEXT:    v_cmp_lt_u32_e32 vcc, v0, v2
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT:    v_cmp_lt_u32_e32 vcc, v1, v3
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX9-NEXT:    v_add_u32_e32 v0, v0, v2
+; GFX9-NEXT:    v_add_u32_e32 v1, v1, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %uaddo = call {<2 x i32>, <2 x i1>} @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
+  %add = extractvalue {<2 x i32>, <2 x i1>} %uaddo, 0
+  %of = extractvalue {<2 x i32>, <2 x i1>} %uaddo, 1
+  %of.zext = zext <2 x i1> %of to <2 x i32>
+  %ret = add <2 x i32> %add, %of.zext
+  ret <2 x i32> %ret
+}
+
+define i32 @v_saddo_i32(i32 %a, i32 %b) {
+; GFX7-LABEL: v_saddo_i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v0, v1
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v2, v0
+; GFX7-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v1
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddo_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v0, v1
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v2, v0
+; GFX8-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v1
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddo_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v2, v0, v1
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v2, v0
+; GFX9-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v1
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_add_u32_e32 v0, v2, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %saddo = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %add = extractvalue {i32, i1} %saddo, 0
+  %of = extractvalue {i32, i1} %saddo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = add i32 %add, %of.zext
+  ret i32 %ret
+}
+
+define i64 @v_saddo_i64(i64 %a, i64 %b) {
+; GFX7-LABEL: v_saddo_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v4, vcc, v0, v2
+; GFX7-NEXT:    v_addc_u32_e32 v5, vcc, v1, v3, vcc
+; GFX7-NEXT:    v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX7-NEXT:    v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v4, v0
+; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddo_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v0, v2
+; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX8-NEXT:    v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v4, v0
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddo_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v0, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT:    v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v4, v0
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v5, vcc
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %saddo = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %add = extractvalue {i64, i1} %saddo, 0
+  %of = extractvalue {i64, i1} %saddo, 1
+  %of.zext = zext i1 %of to i64
+  %ret = add i64 %add, %of.zext
+  ret i64 %ret
+}
+
+define <2 x i32> @v_saddo_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; GFX7-LABEL: v_saddo_v2i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v4, vcc, v0, v2
+; GFX7-NEXT:    v_add_i32_e32 v5, vcc, v1, v3
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v4, v0
+; GFX7-NEXT:    v_cmp_lt_i32_e64 s[4:5], v5, v1
+; GFX7-NEXT:    v_cmp_gt_i32_e64 s[6:7], 0, v2
+; GFX7-NEXT:    v_cmp_gt_i32_e64 s[8:9], 0, v3
+; GFX7-NEXT:    s_xor_b64 s[6:7], s[6:7], vcc
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v4, v0
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddo_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v0, v2
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v1, v3
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v4, v0
+; GFX8-NEXT:    v_cmp_lt_i32_e64 s[4:5], v5, v1
+; GFX8-NEXT:    v_cmp_gt_i32_e64 s[6:7], 0, v2
+; GFX8-NEXT:    v_cmp_gt_i32_e64 s[8:9], 0, v3
+; GFX8-NEXT:    s_xor_b64 s[6:7], s[6:7], vcc
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v4, v0
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v5, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddo_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v4, v0, v2
+; GFX9-NEXT:    v_add_u32_e32 v5, v1, v3
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v4, v0
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[4:5], v5, v1
+; GFX9-NEXT:    v_cmp_gt_i32_e64 s[6:7], 0, v2
+; GFX9-NEXT:    v_cmp_gt_i32_e64 s[8:9], 0, v3
+; GFX9-NEXT:    s_xor_b64 s[6:7], s[6:7], vcc
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX9-NEXT:    v_add_u32_e32 v0, v4, v0
+; GFX9-NEXT:    v_add_u32_e32 v1, v5, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %saddo = call {<2 x i32>, <2 x i1>} @llvm.sadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
+  %add = extractvalue {<2 x i32>, <2 x i1>} %saddo, 0
+  %of = extractvalue {<2 x i32>, <2 x i1>} %saddo, 1
+  %of.zext = zext <2 x i1> %of to <2 x i32>
+  %ret = add <2 x i32> %add, %of.zext
+  ret <2 x i32> %ret
+}
+
+define i8 @v_saddo_i8(i8 %a, i8 %b) {
+; GFX7-LABEL: v_saddo_i8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v0, v1
+; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 8
+; GFX7-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddo_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v2, v0, v1
+; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 8
+; GFX8-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_add_u16_e32 v0, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddo_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v2, v0, v1
+; GFX9-NEXT:    v_cmp_lt_i32_sdwa s[4:5], sext(v2), sext(v0) src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_cmp_lt_i32_sdwa s[6:7], sext(v1), v0 src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_add_u16_e32 v0, v2, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %saddo = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b)
+  %add = extractvalue {i8, i1} %saddo, 0
+  %of = extractvalue {i8, i1} %saddo, 1
+  %of.zext = zext i1 %of to i8
+  %ret = add i8 %add, %of.zext
+  ret i8 %ret
+}
+
+define i7 @v_saddo_i7(i7 %a, i7 %b) {
+; GFX7-LABEL: v_saddo_i7:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v0, v1
+; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX7-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddo_i7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v2, v0, v1
+; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX8-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_add_u16_e32 v0, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddo_i7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v2, v0, v1
+; GFX9-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX9-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX9-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX9-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_add_u16_e32 v0, v2, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %saddo = call {i7, i1} @llvm.sadd.with.overflow.i7(i7 %a, i7 %b)
+  %add = extractvalue {i7, i1} %saddo, 0
+  %of = extractvalue {i7, i1} %saddo, 1
+  %of.zext = zext i1 %of to i7
+  %ret = add i7 %add, %of.zext
+  ret i7 %ret
+}
+
+define amdgpu_ps i32 @s_uaddo_i32(i32 inreg %a, i32 inreg %b) {
+; GFX7-LABEL: s_uaddo_i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_add_u32 s0, s0, s1
+; GFX7-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX7-NEXT:    s_and_b32 s1, s1, 1
+; GFX7-NEXT:    s_add_i32 s0, s0, s1
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddo_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_add_u32 s0, s0, s1
+; GFX8-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX8-NEXT:    s_and_b32 s1, s1, 1
+; GFX8-NEXT:    s_add_i32 s0, s0, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddo_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_add_u32 s0, s0, s1
+; GFX9-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX9-NEXT:    s_and_b32 s1, s1, 1
+; GFX9-NEXT:    s_add_i32 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %uaddo = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %add = extractvalue {i32, i1} %uaddo, 0
+  %of = extractvalue {i32, i1} %uaddo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = add i32 %add, %of.zext
+  ret i32 %ret
+}
+
+define amdgpu_ps i64 @s_uaddo_i64(i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_uaddo_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_add_u32 s0, s0, s2
+; GFX7-NEXT:    s_cselect_b32 s4, 1, 0
+; GFX7-NEXT:    s_and_b32 s4, s4, 1
+; GFX7-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX7-NEXT:    v_mov_b32_e32 v0, s2
+; GFX7-NEXT:    s_addc_u32 s1, s1, s3
+; GFX7-NEXT:    v_mov_b32_e32 v1, s3
+; GFX7-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddo_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_add_u32 s0, s0, s2
+; GFX8-NEXT:    s_cselect_b32 s4, 1, 0
+; GFX8-NEXT:    s_and_b32 s4, s4, 1
+; GFX8-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8-NEXT:    s_addc_u32 s1, s1, s3
+; GFX8-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddo_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_add_u32 s0, s0, s2
+; GFX9-NEXT:    s_cselect_b32 s4, 1, 0
+; GFX9-NEXT:    s_and_b32 s4, s4, 1
+; GFX9-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    s_addc_u32 s1, s1, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX9-NEXT:    ; return to shader part epilog
+  %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %add = extractvalue {i64, i1} %uaddo, 0
+  %of = extractvalue {i64, i1} %uaddo, 1
+  %of.zext = zext i1 %of to i64
+  %ret = add i64 %add, %of.zext
+  ret i64 %ret
+}
+
+define amdgpu_ps <2 x i32> @s_uaddo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b) {
+; GFX7-LABEL: s_uaddo_v2i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_add_i32 s0, s0, s2
+; GFX7-NEXT:    s_add_i32 s1, s1, s3
+; GFX7-NEXT:    s_cmp_lt_u32 s0, s2
+; GFX7-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX7-NEXT:    s_cmp_lt_u32 s1, s3
+; GFX7-NEXT:    s_cselect_b32 s3, 1, 0
+; GFX7-NEXT:    s_add_i32 s0, s0, s2
+; GFX7-NEXT:    s_add_i32 s1, s1, s3
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddo_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_add_i32 s0, s0, s2
+; GFX8-NEXT:    s_add_i32 s1, s1, s3
+; GFX8-NEXT:    s_cmp_lt_u32 s0, s2
+; GFX8-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX8-NEXT:    s_cmp_lt_u32 s1, s3
+; GFX8-NEXT:    s_cselect_b32 s3, 1, 0
+; GFX8-NEXT:    s_add_i32 s0, s0, s2
+; GFX8-NEXT:    s_add_i32 s1, s1, s3
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddo_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_add_i32 s0, s0, s2
+; GFX9-NEXT:    s_add_i32 s1, s1, s3
+; GFX9-NEXT:    s_cmp_lt_u32 s0, s2
+; GFX9-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX9-NEXT:    s_cmp_lt_u32 s1, s3
+; GFX9-NEXT:    s_cselect_b32 s3, 1, 0
+; GFX9-NEXT:    s_add_i32 s0, s0, s2
+; GFX9-NEXT:    s_add_i32 s1, s1, s3
+; GFX9-NEXT:    ; return to shader part epilog
+  %uaddo = call {<2 x i32>, <2 x i1>} @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
+  %add = extractvalue {<2 x i32>, <2 x i1>} %uaddo, 0
+  %of = extractvalue {<2 x i32>, <2 x i1>} %uaddo, 1
+  %of.zext = zext <2 x i1> %of to <2 x i32>
+  %ret = add <2 x i32> %add, %of.zext
+  ret <2 x i32> %ret
+}
+
+define i8 @s_uaddo_i8(i8 %a, i8 %b) {
+; GFX7-LABEL: s_uaddo_i8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_movk_i32 s4, 0xff
+; GFX7-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_uaddo_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_movk_i32 s4, 0xff
+; GFX8-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_uaddo_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT:    v_cmp_ne_u32_sdwa s[4:5], v0, v0 src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %uaddo = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
+  %add = extractvalue {i8, i1} %uaddo, 0
+  %of = extractvalue {i8, i1} %uaddo, 1
+  %of.zext = zext i1 %of to i8
+  %ret = add i8 %add, %of.zext
+  ret i8 %ret
+}
+
+define i7 @s_uaddo_i7(i7 %a, i7 %b) {
+; GFX7-LABEL: s_uaddo_i7:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_movk_i32 s4, 0x7f
+; GFX7-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_uaddo_i7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_movk_i32 s4, 0x7f
+; GFX8-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_uaddo_i7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_movk_i32 s4, 0x7f
+; GFX9-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %uaddo = call {i7, i1} @llvm.uadd.with.overflow.i7(i7 %a, i7 %b)
+  %add = extractvalue {i7, i1} %uaddo, 0
+  %of = extractvalue {i7, i1} %uaddo, 1
+  %of.zext = zext i1 %of to i7
+  %ret = add i7 %add, %of.zext
+  ret i7 %ret
+}
+
+define amdgpu_ps i32 @s_saddo_i32(i32 inreg %a, i32 inreg %b) {
+; GFX7-LABEL: s_saddo_i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_add_i32 s2, s0, s1
+; GFX7-NEXT:    s_cmp_lt_i32 s2, s0
+; GFX7-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX7-NEXT:    s_cmp_lt_i32 s1, 0
+; GFX7-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX7-NEXT:    s_xor_b32 s0, s1, s0
+; GFX7-NEXT:    s_and_b32 s0, s0, 1
+; GFX7-NEXT:    s_add_i32 s0, s2, s0
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddo_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_add_i32 s2, s0, s1
+; GFX8-NEXT:    s_cmp_lt_i32 s2, s0
+; GFX8-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX8-NEXT:    s_cmp_lt_i32 s1, 0
+; GFX8-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX8-NEXT:    s_xor_b32 s0, s1, s0
+; GFX8-NEXT:    s_and_b32 s0, s0, 1
+; GFX8-NEXT:    s_add_i32 s0, s2, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddo_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_add_i32 s2, s0, s1
+; GFX9-NEXT:    s_cmp_lt_i32 s2, s0
+; GFX9-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX9-NEXT:    s_cmp_lt_i32 s1, 0
+; GFX9-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX9-NEXT:    s_xor_b32 s0, s1, s0
+; GFX9-NEXT:    s_and_b32 s0, s0, 1
+; GFX9-NEXT:    s_add_i32 s0, s2, s0
+; GFX9-NEXT:    ; return to shader part epilog
+  %saddo = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %add = extractvalue {i32, i1} %saddo, 0
+  %of = extractvalue {i32, i1} %saddo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = add i32 %add, %of.zext
+  ret i32 %ret
+}
+
+define amdgpu_ps i64 @s_saddo_i64(i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_saddo_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_add_u32 s4, s0, s2
+; GFX7-NEXT:    s_cselect_b32 s5, 1, 0
+; GFX7-NEXT:    s_and_b32 s5, s5, 1
+; GFX7-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX7-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-NEXT:    s_addc_u32 s5, s1, s3
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX7-NEXT:    v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s5
+; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s4, v0
+; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddo_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_add_u32 s4, s0, s2
+; GFX8-NEXT:    s_cselect_b32 s5, 1, 0
+; GFX8-NEXT:    s_and_b32 s5, s5, 1
+; GFX8-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    s_addc_u32 s5, s1, s3
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX8-NEXT:    v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s4, v0
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddo_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_add_u32 s4, s0, s2
+; GFX9-NEXT:    s_cselect_b32 s5, 1, 0
+; GFX9-NEXT:    s_and_b32 s5, s5, 1
+; GFX9-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    s_addc_u32 s5, s1, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX9-NEXT:    v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s4, v0
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX9-NEXT:    ; return to shader part epilog
+  %saddo = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %add = extractvalue {i64, i1} %saddo, 0
+  %of = extractvalue {i64, i1} %saddo, 1
+  %of.zext = zext i1 %of to i64
+  %ret = add i64 %add, %of.zext
+  ret i64 %ret
+}
+
+define amdgpu_ps <2 x i32> @s_saddo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b) {
+; GFX7-LABEL: s_saddo_v2i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_add_i32 s4, s0, s2
+; GFX7-NEXT:    s_add_i32 s5, s1, s3
+; GFX7-NEXT:    s_cmp_lt_i32 s4, s0
+; GFX7-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX7-NEXT:    s_cmp_lt_i32 s5, s1
+; GFX7-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX7-NEXT:    s_cmp_lt_i32 s2, 0
+; GFX7-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX7-NEXT:    s_cmp_lt_i32 s3, 0
+; GFX7-NEXT:    s_cselect_b32 s3, 1, 0
+; GFX7-NEXT:    s_xor_b32 s0, s2, s0
+; GFX7-NEXT:    s_xor_b32 s1, s3, s1
+; GFX7-NEXT:    s_and_b32 s0, s0, 1
+; GFX7-NEXT:    s_and_b32 s1, s1, 1
+; GFX7-NEXT:    s_add_i32 s0, s4, s0
+; GFX7-NEXT:    s_add_i32 s1, s5, s1
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddo_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_add_i32 s4, s0, s2
+; GFX8-NEXT:    s_add_i32 s5, s1, s3
+; GFX8-NEXT:    s_cmp_lt_i32 s4, s0
+; GFX8-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX8-NEXT:    s_cmp_lt_i32 s5, s1
+; GFX8-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX8-NEXT:    s_cmp_lt_i32 s2, 0
+; GFX8-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX8-NEXT:    s_cmp_lt_i32 s3, 0
+; GFX8-NEXT:    s_cselect_b32 s3, 1, 0
+; GFX8-NEXT:    s_xor_b32 s0, s2, s0
+; GFX8-NEXT:    s_xor_b32 s1, s3, s1
+; GFX8-NEXT:    s_and_b32 s0, s0, 1
+; GFX8-NEXT:    s_and_b32 s1, s1, 1
+; GFX8-NEXT:    s_add_i32 s0, s4, s0
+; GFX8-NEXT:    s_add_i32 s1, s5, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddo_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_add_i32 s4, s0, s2
+; GFX9-NEXT:    s_add_i32 s5, s1, s3
+; GFX9-NEXT:    s_cmp_lt_i32 s4, s0
+; GFX9-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX9-NEXT:    s_cmp_lt_i32 s5, s1
+; GFX9-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX9-NEXT:    s_cmp_lt_i32 s2, 0
+; GFX9-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX9-NEXT:    s_cmp_lt_i32 s3, 0
+; GFX9-NEXT:    s_cselect_b32 s3, 1, 0
+; GFX9-NEXT:    s_xor_b32 s0, s2, s0
+; GFX9-NEXT:    s_xor_b32 s1, s3, s1
+; GFX9-NEXT:    s_and_b32 s0, s0, 1
+; GFX9-NEXT:    s_and_b32 s1, s1, 1
+; GFX9-NEXT:    s_add_i32 s0, s4, s0
+; GFX9-NEXT:    s_add_i32 s1, s5, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %saddo = call {<2 x i32>, <2 x i1>} @llvm.sadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
+  %add = extractvalue {<2 x i32>, <2 x i1>} %saddo, 0
+  %of = extractvalue {<2 x i32>, <2 x i1>} %saddo, 1
+  %of.zext = zext <2 x i1> %of to <2 x i32>
+  %ret = add <2 x i32> %add, %of.zext
+  ret <2 x i32> %ret
+}
+
+define i8 @s_saddo_i8(i8 %a, i8 %b) {
+; GFX7-LABEL: s_saddo_i8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v0, v1
+; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 8
+; GFX7-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_saddo_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v2, v0, v1
+; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 8
+; GFX8-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_add_u16_e32 v0, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_saddo_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v2, v0, v1
+; GFX9-NEXT:    v_cmp_lt_i32_sdwa s[4:5], sext(v2), sext(v0) src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_cmp_lt_i32_sdwa s[6:7], sext(v1), v0 src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_add_u16_e32 v0, v2, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %saddo = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b)
+  %add = extractvalue {i8, i1} %saddo, 0
+  %of = extractvalue {i8, i1} %saddo, 1
+  %of.zext = zext i1 %of to i8
+  %ret = add i8 %add, %of.zext
+  ret i8 %ret
+}
+
+define i7 @s_saddo_i7(i7 %a, i7 %b) {
+; GFX7-LABEL: s_saddo_i7:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v0, v1
+; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX7-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_saddo_i7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v2, v0, v1
+; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX8-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_add_u16_e32 v0, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_saddo_i7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v2, v0, v1
+; GFX9-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX9-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX9-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX9-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_add_u16_e32 v0, v2, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %saddo = call {i7, i1} @llvm.sadd.with.overflow.i7(i7 %a, i7 %b)
+  %add = extractvalue {i7, i1} %saddo, 0
+  %of = extractvalue {i7, i1} %saddo, 1
+  %of.zext = zext i1 %of to i7
+  %ret = add i7 %add, %of.zext
+  ret i7 %ret
+}
+
+define amdgpu_ps i32 @uaddo_i32_sv(i32 inreg %a, i32 %b) {
+; GFX7-LABEL: uaddo_i32_sv:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: uaddo_i32_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: uaddo_i32_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %uaddo = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %add = extractvalue {i32, i1} %uaddo, 0
+  %of = extractvalue {i32, i1} %uaddo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = add i32 %add, %of.zext
+  ret i32 %ret
+}
+
+define amdgpu_ps i16 @uaddo_i16_sv(i16 inreg %a, i16 %b) {
+; GFX7-LABEL: uaddo_i16_sv:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_mov_b32 s1, 0xffff
+; GFX7-NEXT:    s_and_b32 s0, s0, s1
+; GFX7-NEXT:    v_and_b32_e32 v0, s1, v0
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; GFX7-NEXT:    v_and_b32_e32 v1, s1, v0
+; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: uaddo_i16_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_mov_b32 s1, 0xffff
+; GFX8-NEXT:    s_and_b32 s0, s0, s1
+; GFX8-NEXT:    v_and_b32_e32 v0, s1, v0
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, s1, v0
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: uaddo_i16_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s1, 0xffff
+; GFX9-NEXT:    s_and_b32 s0, s0, s1
+; GFX9-NEXT:    v_add_u32_sdwa v0, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX9-NEXT:    v_cmp_ne_u32_sdwa s[0:1], v0, v0 src0_sel:DWORD src1_sel:WORD_0
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %uaddo = call {i16, i1} @llvm.uadd.with.overflow.i16(i16 %a, i16 %b)
+  %add = extractvalue {i16, i1} %uaddo, 0
+  %of = extractvalue {i16, i1} %uaddo, 1
+  %of.zext = zext i1 %of to i16
+  %ret = add i16 %add, %of.zext
+  ret i16 %ret
+}
+
+define amdgpu_ps i32 @saddo_i32_sv(i32 inreg %a, i32 %b) {
+; GFX7-LABEL: saddo_i32_sv:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, s0, v0
+; GFX7-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v1
+; GFX7-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: saddo_i32_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s0, v0
+; GFX8-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v1
+; GFX8-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v1, v0
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: saddo_i32_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_add_u32_e32 v1, s0, v0
+; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v1
+; GFX9-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v0
+; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX9-NEXT:    v_add_u32_e32 v0, v1, v0
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %saddo = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %add = extractvalue {i32, i1} %saddo, 0
+  %of = extractvalue {i32, i1} %saddo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = add i32 %add, %of.zext
+  ret i32 %ret
+}
+
+define amdgpu_ps i16 @saddo_i16_sv(i16 inreg %a, i16 %b) {
+; GFX7-LABEL: saddo_i16_sv:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, s0, v0
+; GFX7-NEXT:    v_bfe_i32 v2, v1, 0, 16
+; GFX7-NEXT:    s_sext_i32_i16 s0, s0
+; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v2
+; GFX7-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: saddo_i16_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_add_u16_e32 v1, s0, v0
+; GFX8-NEXT:    v_cmp_gt_i16_e32 vcc, s0, v1
+; GFX8-NEXT:    v_cmp_gt_i16_e64 s[0:1], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX8-NEXT:    v_add_u16_e32 v0, v1, v0
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: saddo_i16_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_add_u16_e32 v1, s0, v0
+; GFX9-NEXT:    v_cmp_gt_i16_e32 vcc, s0, v1
+; GFX9-NEXT:    v_cmp_gt_i16_e64 s[0:1], 0, v0
+; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX9-NEXT:    v_add_u16_e32 v0, v1, v0
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %saddo = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a, i16 %b)
+  %add = extractvalue {i16, i1} %saddo, 0
+  %of = extractvalue {i16, i1} %saddo, 1
+  %of.zext = zext i1 %of to i16
+  %ret = add i16 %add, %of.zext
+  ret i16 %ret
+}
+
+declare {i7, i1} @llvm.uadd.with.overflow.i7(i7 %a, i7 %b)
+declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
+declare {i16, i1} @llvm.uadd.with.overflow.i16(i16 %a, i16 %b)
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+declare {<2 x i32>, <2 x i1>} @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
+
+declare {i7, i1} @llvm.sadd.with.overflow.i7(i7 %a, i7 %b)
+declare {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b)
+declare {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a, i16 %b)
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+declare {<2 x i32>, <2 x i1>} @llvm.sadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/subo.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/subo.ll
new file mode 100644
index 0000000000000..653fcf1db34fb
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/subo.ll
@@ -0,0 +1,1141 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+
+define i32 @v_usubo_i32(i32 %a, i32 %b) {
+; GFX7-LABEL: v_usubo_i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubo_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubo_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v1
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %usubo = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %sub = extractvalue {i32, i1} %usubo, 0
+  %of = extractvalue {i32, i1} %usubo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = sub i32 %sub, %of.zext
+  ret i32 %ret
+}
+
+define i64 @v_usubo_i64(i64 %a, i64 %b) {
+; GFX7-LABEL: v_usubo_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_sub_i32_e32 v4, vcc, v0, v2
+; GFX7-NEXT:    v_subb_u32_e32 v5, vcc, v1, v3, vcc
+; GFX7-NEXT:    v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v4, v0
+; GFX7-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v5, vcc
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubo_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, v0, v2
+; GFX8-NEXT:    v_subb_u32_e32 v5, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v4, v0
+; GFX8-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v5, vcc
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubo_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_sub_co_u32_e32 v4, vcc, v0, v2
+; GFX9-NEXT:    v_subb_co_u32_e32 v5, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v4, v0
+; GFX9-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v5, vcc
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %usubo = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %sub = extractvalue {i64, i1} %usubo, 0
+  %of = extractvalue {i64, i1} %usubo, 1
+  %of.zext = zext i1 %of to i64
+  %ret = sub i64 %sub, %of.zext
+  ret i64 %ret
+}
+
+define i8 @v_usubo_i8(i8 %a, i8 %b) {
+; GFX7-LABEL: v_usubo_i8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_movk_i32 s4, 0xff
+; GFX7-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubo_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_movk_i32 s4, 0xff
+; GFX8-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubo_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_sub_u32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT:    v_cmp_ne_u32_sdwa s[4:5], v0, v0 src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX9-NEXT:    v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %usubo = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %a, i8 %b)
+  %sub = extractvalue {i8, i1} %usubo, 0
+  %of = extractvalue {i8, i1} %usubo, 1
+  %of.zext = zext i1 %of to i8
+  %ret = sub i8 %sub, %of.zext
+  ret i8 %ret
+}
+
+define i7 @v_usubo_i7(i7 %a, i7 %b) {
+; GFX7-LABEL: v_usubo_i7:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_movk_i32 s4, 0x7f
+; GFX7-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubo_i7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_movk_i32 s4, 0x7f
+; GFX8-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubo_i7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_movk_i32 s4, 0x7f
+; GFX9-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT:    v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %usubo = call {i7, i1} @llvm.usub.with.overflow.i7(i7 %a, i7 %b)
+  %sub = extractvalue {i7, i1} %usubo, 0
+  %of = extractvalue {i7, i1} %usubo, 1
+  %of.zext = zext i1 %of to i7
+  %ret = sub i7 %sub, %of.zext
+  ret i7 %ret
+}
+
+define <2 x i32> @v_usubo_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; GFX7-LABEL: v_usubo_v2i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_sub_i32_e32 v4, vcc, v0, v2
+; GFX7-NEXT:    v_sub_i32_e32 v5, vcc, v1, v3
+; GFX7-NEXT:    v_cmp_lt_u32_e32 vcc, v0, v2
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX7-NEXT:    v_cmp_lt_u32_e32 vcc, v1, v3
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v4, v0
+; GFX7-NEXT:    v_sub_i32_e32 v1, vcc, v5, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubo_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, v0, v2
+; GFX8-NEXT:    v_sub_u32_e32 v5, vcc, v1, v3
+; GFX8-NEXT:    v_cmp_lt_u32_e32 vcc, v0, v2
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_lt_u32_e32 vcc, v1, v3
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v4, v0
+; GFX8-NEXT:    v_sub_u32_e32 v1, vcc, v5, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubo_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_cmp_lt_u32_e32 vcc, v0, v2
+; GFX9-NEXT:    v_sub_u32_e32 v4, v0, v2
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT:    v_cmp_lt_u32_e32 vcc, v1, v3
+; GFX9-NEXT:    v_sub_u32_e32 v5, v1, v3
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT:    v_sub_u32_e32 v0, v4, v0
+; GFX9-NEXT:    v_sub_u32_e32 v1, v5, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %usubo = call {<2 x i32>, <2 x i1>} @llvm.usub.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
+  %sub = extractvalue {<2 x i32>, <2 x i1>} %usubo, 0
+  %of = extractvalue {<2 x i32>, <2 x i1>} %usubo, 1
+  %of.zext = zext <2 x i1> %of to <2 x i32>
+  %ret = sub <2 x i32> %sub, %of.zext
+  ret <2 x i32> %ret
+}
+
+define i32 @v_ssubo_i32(i32 %a, i32 %b) {
+; GFX7-LABEL: v_ssubo_i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_sub_i32_e32 v2, vcc, v0, v1
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v2, v0
+; GFX7-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v1
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v2, v0
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubo_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_sub_u32_e32 v2, vcc, v0, v1
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v2, v0
+; GFX8-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v1
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubo_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_sub_u32_e32 v2, v0, v1
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v2, v0
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v1
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_sub_u32_e32 v0, v2, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %ssubo = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %sub = extractvalue {i32, i1} %ssubo, 0
+  %of = extractvalue {i32, i1} %ssubo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = sub i32 %sub, %of.zext
+  ret i32 %ret
+}
+
+define i64 @v_ssubo_i64(i64 %a, i64 %b) {
+; GFX7-LABEL: v_ssubo_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_sub_i32_e32 v4, vcc, v0, v2
+; GFX7-NEXT:    v_subb_u32_e32 v5, vcc, v1, v3, vcc
+; GFX7-NEXT:    v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX7-NEXT:    v_cmp_lt_i64_e64 s[4:5], 0, v[2:3]
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v4, v0
+; GFX7-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v5, vcc
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubo_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, v0, v2
+; GFX8-NEXT:    v_subb_u32_e32 v5, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX8-NEXT:    v_cmp_lt_i64_e64 s[4:5], 0, v[2:3]
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v4, v0
+; GFX8-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v5, vcc
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubo_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_sub_co_u32_e32 v4, vcc, v0, v2
+; GFX9-NEXT:    v_subb_co_u32_e32 v5, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT:    v_cmp_lt_i64_e64 s[4:5], 0, v[2:3]
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v4, v0
+; GFX9-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v5, vcc
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %ssubo = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %sub = extractvalue {i64, i1} %ssubo, 0
+  %of = extractvalue {i64, i1} %ssubo, 1
+  %of.zext = zext i1 %of to i64
+  %ret = sub i64 %sub, %of.zext
+  ret i64 %ret
+}
+
+define <2 x i32> @v_ssubo_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; GFX7-LABEL: v_ssubo_v2i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_sub_i32_e32 v4, vcc, v0, v2
+; GFX7-NEXT:    v_sub_i32_e32 v5, vcc, v1, v3
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v4, v0
+; GFX7-NEXT:    v_cmp_lt_i32_e64 s[4:5], v5, v1
+; GFX7-NEXT:    v_cmp_lt_i32_e64 s[6:7], 0, v2
+; GFX7-NEXT:    v_cmp_lt_i32_e64 s[8:9], 0, v3
+; GFX7-NEXT:    s_xor_b64 s[6:7], s[6:7], vcc
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v4, v0
+; GFX7-NEXT:    v_sub_i32_e32 v1, vcc, v5, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubo_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, v0, v2
+; GFX8-NEXT:    v_sub_u32_e32 v5, vcc, v1, v3
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v4, v0
+; GFX8-NEXT:    v_cmp_lt_i32_e64 s[4:5], v5, v1
+; GFX8-NEXT:    v_cmp_lt_i32_e64 s[6:7], 0, v2
+; GFX8-NEXT:    v_cmp_lt_i32_e64 s[8:9], 0, v3
+; GFX8-NEXT:    s_xor_b64 s[6:7], s[6:7], vcc
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v4, v0
+; GFX8-NEXT:    v_sub_u32_e32 v1, vcc, v5, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubo_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_sub_u32_e32 v4, v0, v2
+; GFX9-NEXT:    v_sub_u32_e32 v5, v1, v3
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v4, v0
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[4:5], v5, v1
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[6:7], 0, v2
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[8:9], 0, v3
+; GFX9-NEXT:    s_xor_b64 s[6:7], s[6:7], vcc
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX9-NEXT:    v_sub_u32_e32 v0, v4, v0
+; GFX9-NEXT:    v_sub_u32_e32 v1, v5, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %ssubo = call {<2 x i32>, <2 x i1>} @llvm.ssub.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
+  %sub = extractvalue {<2 x i32>, <2 x i1>} %ssubo, 0
+  %of = extractvalue {<2 x i32>, <2 x i1>} %ssubo, 1
+  %of.zext = zext <2 x i1> %of to <2 x i32>
+  %ret = sub <2 x i32> %sub, %of.zext
+  ret <2 x i32> %ret
+}
+
+define i8 @v_ssubo_i8(i8 %a, i8 %b) {
+; GFX7-LABEL: v_ssubo_i8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_sub_i32_e32 v2, vcc, v0, v1
+; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 8
+; GFX7-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v2, v0
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubo_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_sub_u16_e32 v2, v0, v1
+; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 8
+; GFX8-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_sub_u16_e32 v0, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubo_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_sub_u16_e32 v2, v0, v1
+; GFX9-NEXT:    v_cmp_lt_i32_sdwa s[4:5], sext(v2), sext(v0) src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_cmp_gt_i32_sdwa s[6:7], sext(v1), v0 src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_sub_u16_e32 v0, v2, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %ssubo = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %a, i8 %b)
+  %sub = extractvalue {i8, i1} %ssubo, 0
+  %of = extractvalue {i8, i1} %ssubo, 1
+  %of.zext = zext i1 %of to i8
+  %ret = sub i8 %sub, %of.zext
+  ret i8 %ret
+}
+
+define i7 @v_ssubo_i7(i7 %a, i7 %b) {
+; GFX7-LABEL: v_ssubo_i7:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_sub_i32_e32 v2, vcc, v0, v1
+; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX7-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v2, v0
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubo_i7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_sub_u16_e32 v2, v0, v1
+; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX8-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_sub_u16_e32 v0, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubo_i7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_sub_u16_e32 v2, v0, v1
+; GFX9-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX9-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX9-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v0
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_sub_u16_e32 v0, v2, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %ssubo = call {i7, i1} @llvm.ssub.with.overflow.i7(i7 %a, i7 %b)
+  %sub = extractvalue {i7, i1} %ssubo, 0
+  %of = extractvalue {i7, i1} %ssubo, 1
+  %of.zext = zext i1 %of to i7
+  %ret = sub i7 %sub, %of.zext
+  ret i7 %ret
+}
+
+define amdgpu_ps i32 @s_usubo_i32(i32 inreg %a, i32 inreg %b) {
+; GFX7-LABEL: s_usubo_i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_sub_u32 s0, s0, s1
+; GFX7-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX7-NEXT:    s_and_b32 s1, s1, 1
+; GFX7-NEXT:    s_sub_i32 s0, s0, s1
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubo_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_sub_u32 s0, s0, s1
+; GFX8-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX8-NEXT:    s_and_b32 s1, s1, 1
+; GFX8-NEXT:    s_sub_i32 s0, s0, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubo_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_sub_u32 s0, s0, s1
+; GFX9-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX9-NEXT:    s_and_b32 s1, s1, 1
+; GFX9-NEXT:    s_sub_i32 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %usubo = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %sub = extractvalue {i32, i1} %usubo, 0
+  %of = extractvalue {i32, i1} %usubo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = sub i32 %sub, %of.zext
+  ret i32 %ret
+}
+
+define amdgpu_ps i64 @s_usubo_i64(i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_usubo_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_sub_u32 s4, s0, s2
+; GFX7-NEXT:    v_mov_b32_e32 v0, s2
+; GFX7-NEXT:    s_cselect_b32 s5, 1, 0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s3
+; GFX7-NEXT:    s_and_b32 s5, s5, 1
+; GFX7-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX7-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX7-NEXT:    s_subb_u32 s5, s1, s3
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX7-NEXT:    v_mov_b32_e32 v1, s5
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GFX7-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubo_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_sub_u32 s4, s0, s2
+; GFX8-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8-NEXT:    s_cselect_b32 s5, 1, 0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8-NEXT:    s_and_b32 s5, s5, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX8-NEXT:    s_subb_u32 s5, s1, s3
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, s4, v0
+; GFX8-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubo_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_sub_u32 s4, s0, s2
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    s_cselect_b32 s5, 1, 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    s_and_b32 s5, s5, 1
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX9-NEXT:    s_subb_u32 s5, s1, s3
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s4, v0
+; GFX9-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX9-NEXT:    ; return to shader part epilog
+  %usubo = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %sub = extractvalue {i64, i1} %usubo, 0
+  %of = extractvalue {i64, i1} %usubo, 1
+  %of.zext = zext i1 %of to i64
+  %ret = sub i64 %sub, %of.zext
+  ret i64 %ret
+}
+
+define amdgpu_ps <2 x i32> @s_usubo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b) {
+; GFX7-LABEL: s_usubo_v2i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_sub_i32 s4, s0, s2
+; GFX7-NEXT:    s_sub_i32 s5, s1, s3
+; GFX7-NEXT:    s_cmp_lt_u32 s0, s2
+; GFX7-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX7-NEXT:    s_cmp_lt_u32 s1, s3
+; GFX7-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX7-NEXT:    s_sub_i32 s0, s4, s0
+; GFX7-NEXT:    s_sub_i32 s1, s5, s1
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubo_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_sub_i32 s4, s0, s2
+; GFX8-NEXT:    s_sub_i32 s5, s1, s3
+; GFX8-NEXT:    s_cmp_lt_u32 s0, s2
+; GFX8-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX8-NEXT:    s_cmp_lt_u32 s1, s3
+; GFX8-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX8-NEXT:    s_sub_i32 s0, s4, s0
+; GFX8-NEXT:    s_sub_i32 s1, s5, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubo_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_sub_i32 s4, s0, s2
+; GFX9-NEXT:    s_sub_i32 s5, s1, s3
+; GFX9-NEXT:    s_cmp_lt_u32 s0, s2
+; GFX9-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX9-NEXT:    s_cmp_lt_u32 s1, s3
+; GFX9-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX9-NEXT:    s_sub_i32 s0, s4, s0
+; GFX9-NEXT:    s_sub_i32 s1, s5, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %usubo = call {<2 x i32>, <2 x i1>} @llvm.usub.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
+  %sub = extractvalue {<2 x i32>, <2 x i1>} %usubo, 0
+  %of = extractvalue {<2 x i32>, <2 x i1>} %usubo, 1
+  %of.zext = zext <2 x i1> %of to <2 x i32>
+  %ret = sub <2 x i32> %sub, %of.zext
+  ret <2 x i32> %ret
+}
+
+define i8 @s_usubo_i8(i8 %a, i8 %b) {
+; GFX7-LABEL: s_usubo_i8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_movk_i32 s4, 0xff
+; GFX7-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_usubo_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_movk_i32 s4, 0xff
+; GFX8-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_usubo_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_sub_u32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT:    v_cmp_ne_u32_sdwa s[4:5], v0, v0 src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX9-NEXT:    v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %usubo = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %a, i8 %b)
+  %sub = extractvalue {i8, i1} %usubo, 0
+  %of = extractvalue {i8, i1} %usubo, 1
+  %of.zext = zext i1 %of to i8
+  %ret = sub i8 %sub, %of.zext
+  ret i8 %ret
+}
+
+define i7 @s_usubo_i7(i7 %a, i7 %b) {
+; GFX7-LABEL: s_usubo_i7:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_movk_i32 s4, 0x7f
+; GFX7-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_usubo_i7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_movk_i32 s4, 0x7f
+; GFX8-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_usubo_i7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_movk_i32 s4, 0x7f
+; GFX9-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT:    v_and_b32_e32 v1, s4, v0
+; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT:    v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %usubo = call {i7, i1} @llvm.usub.with.overflow.i7(i7 %a, i7 %b)
+  %sub = extractvalue {i7, i1} %usubo, 0
+  %of = extractvalue {i7, i1} %usubo, 1
+  %of.zext = zext i1 %of to i7
+  %ret = sub i7 %sub, %of.zext
+  ret i7 %ret
+}
+
+define amdgpu_ps i32 @s_ssubo_i32(i32 inreg %a, i32 inreg %b) {
+; GFX7-LABEL: s_ssubo_i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_sub_i32 s2, s0, s1
+; GFX7-NEXT:    s_cmp_lt_i32 s2, s0
+; GFX7-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX7-NEXT:    s_cmp_gt_i32 s1, 0
+; GFX7-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX7-NEXT:    s_xor_b32 s0, s1, s0
+; GFX7-NEXT:    s_and_b32 s0, s0, 1
+; GFX7-NEXT:    s_sub_i32 s0, s2, s0
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubo_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_sub_i32 s2, s0, s1
+; GFX8-NEXT:    s_cmp_lt_i32 s2, s0
+; GFX8-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX8-NEXT:    s_cmp_gt_i32 s1, 0
+; GFX8-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX8-NEXT:    s_xor_b32 s0, s1, s0
+; GFX8-NEXT:    s_and_b32 s0, s0, 1
+; GFX8-NEXT:    s_sub_i32 s0, s2, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubo_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_sub_i32 s2, s0, s1
+; GFX9-NEXT:    s_cmp_lt_i32 s2, s0
+; GFX9-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX9-NEXT:    s_cmp_gt_i32 s1, 0
+; GFX9-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX9-NEXT:    s_xor_b32 s0, s1, s0
+; GFX9-NEXT:    s_and_b32 s0, s0, 1
+; GFX9-NEXT:    s_sub_i32 s0, s2, s0
+; GFX9-NEXT:    ; return to shader part epilog
+  %ssubo = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %sub = extractvalue {i32, i1} %ssubo, 0
+  %of = extractvalue {i32, i1} %ssubo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = sub i32 %sub, %of.zext
+  ret i32 %ret
+}
+
+define amdgpu_ps i64 @s_ssubo_i64(i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_ssubo_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_sub_u32 s4, s0, s2
+; GFX7-NEXT:    s_cselect_b32 s5, 1, 0
+; GFX7-NEXT:    s_and_b32 s5, s5, 1
+; GFX7-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX7-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-NEXT:    s_subb_u32 s5, s1, s3
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX7-NEXT:    v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s5
+; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GFX7-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubo_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_sub_u32 s4, s0, s2
+; GFX8-NEXT:    s_cselect_b32 s5, 1, 0
+; GFX8-NEXT:    s_and_b32 s5, s5, 1
+; GFX8-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    s_subb_u32 s5, s1, s3
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX8-NEXT:    v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, s4, v0
+; GFX8-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubo_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_sub_u32 s4, s0, s2
+; GFX9-NEXT:    s_cselect_b32 s5, 1, 0
+; GFX9-NEXT:    s_and_b32 s5, s5, 1
+; GFX9-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    s_subb_u32 s5, s1, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX9-NEXT:    v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s4, v0
+; GFX9-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX9-NEXT:    ; return to shader part epilog
+  %ssubo = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %sub = extractvalue {i64, i1} %ssubo, 0
+  %of = extractvalue {i64, i1} %ssubo, 1
+  %of.zext = zext i1 %of to i64
+  %ret = sub i64 %sub, %of.zext
+  ret i64 %ret
+}
+
+define amdgpu_ps <2 x i32> @s_ssubo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b) {
+; GFX7-LABEL: s_ssubo_v2i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_sub_i32 s4, s0, s2
+; GFX7-NEXT:    s_sub_i32 s5, s1, s3
+; GFX7-NEXT:    s_cmp_lt_i32 s4, s0
+; GFX7-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX7-NEXT:    s_cmp_lt_i32 s5, s1
+; GFX7-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX7-NEXT:    s_cmp_gt_i32 s2, 0
+; GFX7-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX7-NEXT:    s_cmp_gt_i32 s3, 0
+; GFX7-NEXT:    s_cselect_b32 s3, 1, 0
+; GFX7-NEXT:    s_xor_b32 s0, s2, s0
+; GFX7-NEXT:    s_xor_b32 s1, s3, s1
+; GFX7-NEXT:    s_and_b32 s0, s0, 1
+; GFX7-NEXT:    s_and_b32 s1, s1, 1
+; GFX7-NEXT:    s_sub_i32 s0, s4, s0
+; GFX7-NEXT:    s_sub_i32 s1, s5, s1
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubo_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_sub_i32 s4, s0, s2
+; GFX8-NEXT:    s_sub_i32 s5, s1, s3
+; GFX8-NEXT:    s_cmp_lt_i32 s4, s0
+; GFX8-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX8-NEXT:    s_cmp_lt_i32 s5, s1
+; GFX8-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX8-NEXT:    s_cmp_gt_i32 s2, 0
+; GFX8-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX8-NEXT:    s_cmp_gt_i32 s3, 0
+; GFX8-NEXT:    s_cselect_b32 s3, 1, 0
+; GFX8-NEXT:    s_xor_b32 s0, s2, s0
+; GFX8-NEXT:    s_xor_b32 s1, s3, s1
+; GFX8-NEXT:    s_and_b32 s0, s0, 1
+; GFX8-NEXT:    s_and_b32 s1, s1, 1
+; GFX8-NEXT:    s_sub_i32 s0, s4, s0
+; GFX8-NEXT:    s_sub_i32 s1, s5, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubo_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_sub_i32 s4, s0, s2
+; GFX9-NEXT:    s_sub_i32 s5, s1, s3
+; GFX9-NEXT:    s_cmp_lt_i32 s4, s0
+; GFX9-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX9-NEXT:    s_cmp_lt_i32 s5, s1
+; GFX9-NEXT:    s_cselect_b32 s1, 1, 0
+; GFX9-NEXT:    s_cmp_gt_i32 s2, 0
+; GFX9-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX9-NEXT:    s_cmp_gt_i32 s3, 0
+; GFX9-NEXT:    s_cselect_b32 s3, 1, 0
+; GFX9-NEXT:    s_xor_b32 s0, s2, s0
+; GFX9-NEXT:    s_xor_b32 s1, s3, s1
+; GFX9-NEXT:    s_and_b32 s0, s0, 1
+; GFX9-NEXT:    s_and_b32 s1, s1, 1
+; GFX9-NEXT:    s_sub_i32 s0, s4, s0
+; GFX9-NEXT:    s_sub_i32 s1, s5, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %ssubo = call {<2 x i32>, <2 x i1>} @llvm.ssub.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
+  %sub = extractvalue {<2 x i32>, <2 x i1>} %ssubo, 0
+  %of = extractvalue {<2 x i32>, <2 x i1>} %ssubo, 1
+  %of.zext = zext <2 x i1> %of to <2 x i32>
+  %ret = sub <2 x i32> %sub, %of.zext
+  ret <2 x i32> %ret
+}
+
+define i8 @s_ssubo_i8(i8 %a, i8 %b) {
+; GFX7-LABEL: s_ssubo_i8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_sub_i32_e32 v2, vcc, v0, v1
+; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 8
+; GFX7-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v2, v0
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_ssubo_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_sub_u16_e32 v2, v0, v1
+; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 8
+; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 8
+; GFX8-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_sub_u16_e32 v0, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_ssubo_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_sub_u16_e32 v2, v0, v1
+; GFX9-NEXT:    v_cmp_lt_i32_sdwa s[4:5], sext(v2), sext(v0) src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_cmp_gt_i32_sdwa s[6:7], sext(v1), v0 src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_sub_u16_e32 v0, v2, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %ssubo = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %a, i8 %b)
+  %sub = extractvalue {i8, i1} %ssubo, 0
+  %of = extractvalue {i8, i1} %ssubo, 1
+  %of.zext = zext i1 %of to i8
+  %ret = sub i8 %sub, %of.zext
+  ret i8 %ret
+}
+
+define i7 @s_ssubo_i7(i7 %a, i7 %b) {
+; GFX7-LABEL: s_ssubo_i7:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_sub_i32_e32 v2, vcc, v0, v1
+; GFX7-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX7-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX7-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX7-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v2, v0
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_ssubo_i7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_sub_u16_e32 v2, v0, v1
+; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX8-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX8-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_sub_u16_e32 v0, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_ssubo_i7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_sub_u16_e32 v2, v0, v1
+; GFX9-NEXT:    v_bfe_i32 v3, v2, 0, 7
+; GFX9-NEXT:    v_bfe_i32 v0, v0, 0, 7
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, v3, v0
+; GFX9-NEXT:    v_bfe_i32 v0, v1, 0, 7
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[4:5], 0, v0
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX9-NEXT:    v_sub_u16_e32 v0, v2, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %ssubo = call {i7, i1} @llvm.ssub.with.overflow.i7(i7 %a, i7 %b)
+  %sub = extractvalue {i7, i1} %ssubo, 0
+  %of = extractvalue {i7, i1} %ssubo, 1
+  %of.zext = zext i1 %of to i7
+  %ret = sub i7 %sub, %of.zext
+  ret i7 %ret
+}
+
+define amdgpu_ps i32 @usubo_i32_sv(i32 inreg %a, i32 %b) {
+; GFX7-LABEL: usubo_i32_sv:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, s0, v0
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: usubo_i32_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: usubo_i32_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s0, v0
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %usubo = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %sub = extractvalue {i32, i1} %usubo, 0
+  %of = extractvalue {i32, i1} %usubo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = sub i32 %sub, %of.zext
+  ret i32 %ret
+}
+
+define amdgpu_ps i16 @usubo_i16_sv(i16 inreg %a, i16 %b) {
+; GFX7-LABEL: usubo_i16_sv:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_mov_b32 s1, 0xffff
+; GFX7-NEXT:    s_and_b32 s0, s0, s1
+; GFX7-NEXT:    v_and_b32_e32 v0, s1, v0
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, s0, v0
+; GFX7-NEXT:    v_and_b32_e32 v1, s1, v0
+; GFX7-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: usubo_i16_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_mov_b32 s1, 0xffff
+; GFX8-NEXT:    s_and_b32 s0, s0, s1
+; GFX8-NEXT:    v_and_b32_e32 v0, s1, v0
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, s1, v0
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: usubo_i16_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s1, 0xffff
+; GFX9-NEXT:    s_and_b32 s0, s0, s1
+; GFX9-NEXT:    v_sub_u32_sdwa v0, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX9-NEXT:    v_cmp_ne_u32_sdwa s[0:1], v0, v0 src0_sel:DWORD src1_sel:WORD_0
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX9-NEXT:    v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %usubo = call {i16, i1} @llvm.usub.with.overflow.i16(i16 %a, i16 %b)
+  %sub = extractvalue {i16, i1} %usubo, 0
+  %of = extractvalue {i16, i1} %usubo, 1
+  %of.zext = zext i1 %of to i16
+  %ret = sub i16 %sub, %of.zext
+  ret i16 %ret
+}
+
+define amdgpu_ps i32 @ssubo_i32_sv(i32 inreg %a, i32 %b) {
+; GFX7-LABEL: ssubo_i32_sv:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    v_sub_i32_e32 v1, vcc, s0, v0
+; GFX7-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v1
+; GFX7-NEXT:    v_cmp_lt_i32_e64 s[0:1], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v1, v0
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: ssubo_i32_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_sub_u32_e32 v1, vcc, s0, v0
+; GFX8-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v1
+; GFX8-NEXT:    v_cmp_lt_i32_e64 s[0:1], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v1, v0
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: ssubo_i32_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_sub_u32_e32 v1, s0, v0
+; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v1
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[0:1], 0, v0
+; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX9-NEXT:    v_sub_u32_e32 v0, v1, v0
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %ssubo = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %sub = extractvalue {i32, i1} %ssubo, 0
+  %of = extractvalue {i32, i1} %ssubo, 1
+  %of.zext = zext i1 %of to i32
+  %ret = sub i32 %sub, %of.zext
+  ret i32 %ret
+}
+
+define amdgpu_ps i16 @ssubo_i16_sv(i16 inreg %a, i16 %b) {
+; GFX7-LABEL: ssubo_i16_sv:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    v_sub_i32_e32 v1, vcc, s0, v0
+; GFX7-NEXT:    v_bfe_i32 v2, v1, 0, 16
+; GFX7-NEXT:    s_sext_i32_i16 s0, s0
+; GFX7-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT:    v_cmp_gt_i32_e32 vcc, s0, v2
+; GFX7-NEXT:    v_cmp_lt_i32_e64 s[0:1], 0, v0
+; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v1, v0
+; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: ssubo_i16_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_sub_u16_e32 v1, s0, v0
+; GFX8-NEXT:    v_cmp_gt_i16_e32 vcc, s0, v1
+; GFX8-NEXT:    v_cmp_lt_i16_e64 s[0:1], 0, v0
+; GFX8-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX8-NEXT:    v_sub_u16_e32 v0, v1, v0
+; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: ssubo_i16_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_sub_u16_e32 v1, s0, v0
+; GFX9-NEXT:    v_cmp_gt_i16_e32 vcc, s0, v1
+; GFX9-NEXT:    v_cmp_lt_i16_e64 s[0:1], 0, v0
+; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX9-NEXT:    v_sub_u16_e32 v0, v1, v0
+; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %ssubo = call {i16, i1} @llvm.ssub.with.overflow.i16(i16 %a, i16 %b)
+  %sub = extractvalue {i16, i1} %ssubo, 0
+  %of = extractvalue {i16, i1} %ssubo, 1
+  %of.zext = zext i1 %of to i16
+  %ret = sub i16 %sub, %of.zext
+  ret i16 %ret
+}
+
+declare {i7, i1} @llvm.usub.with.overflow.i7(i7 %a, i7 %b)
+declare {i8, i1} @llvm.usub.with.overflow.i8(i8 %a, i8 %b)
+declare {i16, i1} @llvm.usub.with.overflow.i16(i16 %a, i16 %b)
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+declare {<2 x i32>, <2 x i1>} @llvm.usub.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)
+
+declare {i7, i1} @llvm.ssub.with.overflow.i7(i7 %a, i7 %b)
+declare {i8, i1} @llvm.ssub.with.overflow.i8(i8 %a, i8 %b)
+declare {i16, i1} @llvm.ssub.with.overflow.i16(i16 %a, i16 %b)
+declare {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+declare {<2 x i32>, <2 x i1>} @llvm.ssub.with.overflow.v2i32(<2 x i32> %a, <2 x i32> %b)


        


More information about the llvm-commits mailing list