[llvm] 30bd96f - AMDGPU: Add baseline test for undoing mul add 1 reassociation

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 5 15:44:24 PDT 2023


Author: Matt Arsenault
Date: 2023-06-05T18:44:17-04:00
New Revision: 30bd96fa17f961e62f19554e6f9452ec3d6d5989

URL: https://github.com/llvm/llvm-project/commit/30bd96fa17f961e62f19554e6f9452ec3d6d5989
DIFF: https://github.com/llvm/llvm-project/commit/30bd96fa17f961e62f19554e6f9452ec3d6d5989.diff

LOG: AMDGPU: Add baseline test for undoing mul add 1 reassociation

Add some tests for combines to undo regressions caused by
0cfc6510323fbb5a56a5de23cbc65f7cc30fd34c.

Added: 
    llvm/test/CodeGen/AMDGPU/reassoc-mul-add-1-to-mad.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/reassoc-mul-add-1-to-mad.ll b/llvm/test/CodeGen/AMDGPU/reassoc-mul-add-1-to-mad.ll
new file mode 100644
index 0000000000000..877f21eb23a8e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/reassoc-mul-add-1-to-mad.ll
@@ -0,0 +1,3860 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx600 < %s | FileCheck -check-prefixes=GFX67,GFX6 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX67,GFX7 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx803 < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX900 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX9,GFX90A %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GFX10 %s
+
+; We want to undo these canonicalizations to enable mad matching:
+; (x * y) + x --> x * (y + 1)
+; (x * y) - x --> x * (y - 1)
+
+define i32 @v_mul_add_1_i32(i32 %x, i32 %y) {
+; GFX67-LABEL: v_mul_add_1_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 1, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i32 %y, 1
+  %mul = mul i32 %x, %add
+  ret i32 %mul
+}
+
+define i32 @v_mul_add_1_i32_commute(i32 %x, i32 %y) {
+; GFX67-LABEL: v_mul_add_1_i32_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i32_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 1, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i32_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i32_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i32 %y, 1
+  %mul = mul i32 %add, %x
+  ret i32 %mul
+}
+
+define i32 @v_mul_add_x_i32(i32 %x, i32 %y) {
+; GFX67-LABEL: v_mul_add_x_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mul_lo_u32 v1, v0, v1
+; GFX67-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_x_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_lo_u32 v1, v0, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_x_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v1, v[0:1]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_x_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v0, v1, v[0:1]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i32 %x, %y
+  %add = add i32 %x, %mul
+  ret i32 %add
+}
+
+define i32 @v_mul_sub_1_i32(i32 %x, i32 %y) {
+; GFX67-LABEL: v_mul_sub_1_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, -1, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, -1, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, -1, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, -1, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i32 %y, 1
+  %mul = mul i32 %x, %sub
+  ret i32 %mul
+}
+
+define i32 @v_mul_sub_1_i32_commute(i32 %x, i32 %y) {
+; GFX67-LABEL: v_mul_sub_1_i32_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, -1, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_i32_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, -1, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_i32_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, -1, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_i32_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, -1, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i32 %y, 1
+  %mul = mul i32 %sub, %x
+  ret i32 %mul
+}
+
+define i32 @v_mul_sub_x_i32(i32 %x, i32 %y) {
+; GFX67-LABEL: v_mul_sub_x_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mul_lo_u32 v1, v0, v1
+; GFX67-NEXT:    v_sub_i32_e32 v0, vcc, v1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_x_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_lo_u32 v1, v0, v1
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_x_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mul_lo_u32 v1, v0, v1
+; GFX9-NEXT:    v_sub_u32_e32 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_x_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mul_lo_u32 v1, v0, v1
+; GFX10-NEXT:    v_sub_nc_u32_e32 v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i32 %x, %y
+  %sub = sub i32 %mul, %x
+  ret i32 %sub
+}
+
+define i32 @v_mul_add_2_i32(i32 %x, i32 %y) {
+; GFX67-LABEL: v_mul_add_2_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 2, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_2_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 2, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_2_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 2, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_2_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 2, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i32 %y, 2
+  %mul = mul i32 %x, %add
+  ret i32 %mul
+}
+
+define i32 @v_mul_sub_2_i32(i32 %x, i32 %y) {
+; GFX67-LABEL: v_mul_sub_2_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, -2, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_2_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, -2, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_2_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, -2, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_2_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, -2, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i32 %y, 2
+  %mul = mul i32 %x, %sub
+  ret i32 %mul
+}
+
+define i32 @v_mul_add_65_i32(i32 %x, i32 %y) {
+; GFX67-LABEL: v_mul_add_65_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 0x41, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_65_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x41, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_65_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 0x41, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_65_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 0x41, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i32 %y, 65
+  %mul = mul i32 %x, %add
+  ret i32 %mul
+}
+
+define i32 @v_mul_sub_65_i32(i32 %x, i32 %y) {
+; GFX67-LABEL: v_mul_sub_65_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 0xffffffbf, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_65_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0xffffffbf, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_65_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 0xffffffbf, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_65_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 0xffffffbf, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i32 %y, 65
+  %mul = mul i32 %x, %sub
+  ret i32 %mul
+}
+
+define i24 @v_mul_add_1_i24_zext(i24 zeroext %x, i24 zeroext %y) {
+; GFX67-LABEL: v_mul_add_1_i24_zext:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i24_zext:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 1, v1
+; GFX8-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i24_zext:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i24_zext:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v1
+; GFX10-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i24 %y, 1
+  %mul = mul i24 %x, %add
+  ret i24 %mul
+}
+
+define i24 @v_mul_sub_1_i24_zext(i24 zeroext %x, i24 zeroext %y) {
+; GFX67-LABEL: v_mul_sub_1_i24_zext:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, -1, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_i24_zext:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, -1, v1
+; GFX8-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_i24_zext:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, -1, v1
+; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_i24_zext:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, -1, v1
+; GFX10-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i24 %y, 1
+  %mul = mul i24 %x, %sub
+  ret i24 %mul
+}
+
+define i24 @v_add_mul_i24_zext_1(i24 zeroext %x, i24 zeroext %y) {
+; GFX67-LABEL: v_add_mul_i24_zext_1:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mad_u32_u24 v0, v0, v1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_add_mul_i24_zext_1:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mad_u32_u24 v0, v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_add_mul_i24_zext_1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mad_u32_u24 v0, v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_add_mul_i24_zext_1:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mad_u32_u24 v0, v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i24 %x, %y
+  %add = add i24 %mul, %x
+  ret i24 %add
+}
+
+define i24 @v_mul_add_1_i24_sext(i24 signext %x, i24 signext %y) {
+; GFX67-LABEL: v_mul_add_1_i24_sext:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i24_sext:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 1, v1
+; GFX8-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i24_sext:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i24_sext:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v1
+; GFX10-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i24 %y, 1
+  %mul = mul i24 %x, %add
+  ret i24 %mul
+}
+
+define i24 @v_add_mul_i24_sext_1(i24 signext %x, i24 signext %y) {
+; GFX67-LABEL: v_add_mul_i24_sext_1:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mad_u32_u24 v0, v0, v1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_add_mul_i24_sext_1:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mad_u32_u24 v0, v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_add_mul_i24_sext_1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mad_u32_u24 v0, v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_add_mul_i24_sext_1:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mad_u32_u24 v0, v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i24 %x, %y
+  %add = add i24 %mul, %x
+  ret i24 %add
+}
+
+define i24 @v_mul_sub_1_i24_sext(i24 signext %x, i24 signext %y) {
+; GFX67-LABEL: v_mul_sub_1_i24_sext:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, -1, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_i24_sext:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, -1, v1
+; GFX8-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_i24_sext:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, -1, v1
+; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_i24_sext:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, -1, v1
+; GFX10-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i24 %y, 1
+  %mul = mul i24 %x, %sub
+  ret i24 %mul
+}
+
+define i25 @v_mul_add_1_i25_zext(i25 zeroext %x, i25 zeroext %y) {
+; GFX67-LABEL: v_mul_add_1_i25_zext:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i25_zext:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 1, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i25_zext:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i25_zext:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i25 %y, 1
+  %mul = mul i25 %x, %add
+  ret i25 %mul
+}
+
+define i25 @v_mul_sub_1_i25_zext(i25 zeroext %x, i25 zeroext %y) {
+; GFX67-LABEL: v_mul_sub_1_i25_zext:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 0x1ffffff, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_i25_zext:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x1ffffff, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_i25_zext:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 0x1ffffff, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_i25_zext:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 0x1ffffff, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i25 %y, 1
+  %mul = mul i25 %x, %sub
+  ret i25 %mul
+}
+
+define i25 @v_mul_add_1_i25_sext(i25 signext %x, i25 signext %y) {
+; GFX67-LABEL: v_mul_add_1_i25_sext:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i25_sext:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 1, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i25_sext:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i25_sext:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i25 %y, 1
+  %mul = mul i25 %x, %add
+  ret i25 %mul
+}
+
+define i25 @v_mul_sub_1_i25_sext(i25 signext %x, i25 signext %y) {
+; GFX67-LABEL: v_mul_sub_1_i25_sext:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 0x1ffffff, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_i25_sext:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x1ffffff, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_i25_sext:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 0x1ffffff, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_i25_sext:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 0x1ffffff, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i25 %y, 1
+  %mul = mul i25 %x, %sub
+  ret i25 %mul
+}
+
+define i16 @v_mul_add_1_i16(i16 %x, i16 %y) {
+; GFX67-LABEL: v_mul_add_1_i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v1, v1, 1
+; GFX10-NEXT:    v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i16 %y, 1
+  %mul = mul i16 %x, %add
+  ret i16 %mul
+}
+
+define i32 @v_mul_add_1_i16_zext_result(i16 %x, i16 %y) {
+; GFX67-LABEL: v_mul_add_1_i16_zext_result:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i16_zext_result:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i16_zext_result:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i16_zext_result:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v1, v1, 1
+; GFX10-NEXT:    v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i16 %y, 1
+  %mul = mul i16 %x, %add
+  %zext = zext i16 %mul to i32
+  ret i32 %zext
+}
+
+define i16 @v_mul_add_1_i16_commute(i16 %x, i16 %y) {
+; GFX67-LABEL: v_mul_add_1_i16_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i16_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i16_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i16_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v1, v1, 1
+; GFX10-NEXT:    v_mul_lo_u16 v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i16 %y, 1
+  %mul = mul i16 %add, %x
+  ret i16 %mul
+}
+
+define i16 @v_mul_add_x_i16(i16 %x, i16 %y) {
+; GFX67-LABEL: v_mul_add_x_i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mad_u32_u24 v0, v2, v1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_x_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mad_u16 v0, v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_x_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mad_legacy_u16 v0, v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_x_i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mad_u16 v0, v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i16 %x, %y
+  %add = add i16 %x, %mul
+  ret i16 %add
+}
+
+define i16 @v_mul_sub_1_i16(i16 %x, i16 %y) {
+; GFX67-LABEL: v_mul_sub_1_i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, -1, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v1, -1, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v1, -1, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v1, v1, -1
+; GFX10-NEXT:    v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i16 %y, 1
+  %mul = mul i16 %x, %sub
+  ret i16 %mul
+}
+
+define i16 @v_mul_sub_1_i16_commute(i16 %x, i16 %y) {
+; GFX67-LABEL: v_mul_sub_1_i16_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, -1, v1
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_i16_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v1, -1, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_i16_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v1, -1, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_i16_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v1, v1, -1
+; GFX10-NEXT:    v_mul_lo_u16 v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i16 %y, 1
+  %mul = mul i16 %sub, %x
+  ret i16 %mul
+}
+
+define i16 @v_mul_sub_x_i16(i16 %x, i16 %y) {
+; GFX67-LABEL: v_mul_sub_x_i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v2, v1
+; GFX67-NEXT:    v_sub_i32_e32 v0, vcc, v1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_x_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_lo_u16_e32 v1, v0, v1
+; GFX8-NEXT:    v_sub_u16_e32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_x_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mul_lo_u16_e32 v1, v0, v1
+; GFX9-NEXT:    v_sub_u16_e32 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_x_i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mul_lo_u16 v1, v0, v1
+; GFX10-NEXT:    v_sub_nc_u16 v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i16 %x, %y
+  %sub = sub i16 %mul, %x
+  ret i16 %sub
+}
+
+define i16 @v_mul_add_2_i16(i16 %x, i16 %y) {
+; GFX67-LABEL: v_mul_add_2_i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 2, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_2_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v1, 2, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_2_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v1, 2, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_2_i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v1, v1, 2
+; GFX10-NEXT:    v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i16 %y, 2
+  %mul = mul i16 %x, %add
+  ret i16 %mul
+}
+
+define i16 @v_mul_sub_2_i16(i16 %x, i16 %y) {
+; GFX67-LABEL: v_mul_sub_2_i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, -2, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_2_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v1, -2, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_2_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v1, -2, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_2_i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v1, v1, -2
+; GFX10-NEXT:    v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i16 %y, 2
+  %mul = mul i16 %x, %sub
+  ret i16 %mul
+}
+
+define i64 @v_mul_add_1_i64(i64 %x, i64 %y) {
+; GFX6-LABEL: v_mul_add_1_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 1, v2
+; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX6-NEXT:    v_mul_hi_u32 v4, v0, v2
+; GFX6-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX6-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v3
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_add_1_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, 1, v2
+; GFX7-NEXT:    v_mov_b32_e32 v4, v1
+; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX7-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v4, v2
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 1, v2
+; GFX8-NEXT:    v_mov_b32_e32 v4, v1
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX8-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX8-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX8-NEXT:    v_mul_lo_u32 v2, v4, v2
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, 1, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT:    v_mul_lo_u32 v4, v1, v2
+; GFX9-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX9-NEXT:    v_add3_u32 v1, v1, v3, v4
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v2, 1
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
+; GFX10-NEXT:    v_mul_lo_u32 v4, v1, v2
+; GFX10-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v0, v2, 0
+; GFX10-NEXT:    v_add3_u32 v1, v1, v3, v4
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i64 %y, 1
+  %mul = mul i64 %x, %add
+  ret i64 %mul
+}
+
+define i64 @v_mul_add_1_i64_commute(i64 %x, i64 %y) {
+; GFX6-LABEL: v_mul_add_1_i64_commute:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 1, v2
+; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GFX6-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX6-NEXT:    v_mul_hi_u32 v4, v2, v0
+; GFX6-NEXT:    v_mul_lo_u32 v3, v3, v0
+; GFX6-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_add_1_i64_commute:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v4, v0
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GFX7-NEXT:    v_addc_u32_e32 v2, vcc, 0, v3, vcc
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX7-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v4, 0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v2, v4
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i64_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v4, v0
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 1, v2
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, 0, v3, vcc
+; GFX8-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX8-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v4, 0
+; GFX8-NEXT:    v_mul_lo_u32 v2, v2, v4
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i64_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, 1, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT:    v_mul_lo_u32 v3, v3, v0
+; GFX9-NEXT:    v_mul_lo_u32 v4, v2, v1
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, v0, 0
+; GFX9-NEXT:    v_add3_u32 v1, v1, v4, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i64_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v2, 1
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
+; GFX10-NEXT:    v_mul_lo_u32 v4, v2, v1
+; GFX10-NEXT:    v_mul_lo_u32 v3, v3, v0
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v2, v0, 0
+; GFX10-NEXT:    v_add3_u32 v1, v1, v4, v3
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i64 %y, 1
+  %mul = mul i64 %add, %x
+  ret i64 %mul
+}
+
+define i64 @v_mul_add_x_i64(i64 %x, i64 %y) {
+; GFX6-LABEL: v_mul_add_x_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX6-NEXT:    v_mul_hi_u32 v4, v0, v2
+; GFX6-NEXT:    v_mul_lo_u32 v5, v1, v2
+; GFX6-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_add_x_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v0, v2, v[0:1]
+; GFX7-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX7-NEXT:    v_mul_lo_u32 v0, v0, v3
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v5
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v0, v1
+; GFX7-NEXT:    v_mov_b32_e32 v0, v4
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_x_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v0, v2, v[0:1]
+; GFX8-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v3
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v5
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v0, v1
+; GFX8-NEXT:    v_mov_b32_e32 v0, v4
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_x_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v0, v2, v[0:1]
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v3
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX9-NEXT:    v_add3_u32 v1, v1, v5, v0
+; GFX9-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_x_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mad_u64_u32 v[4:5], null, v0, v2, v[0:1]
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v3
+; GFX10-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX10-NEXT:    v_add3_u32 v1, v1, v5, v0
+; GFX10-NEXT:    v_mov_b32_e32 v0, v4
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i64 %x, %y
+  %add = add i64 %x, %mul
+  ret i64 %add
+}
+
+define i64 @v_mul_sub_1_i64(i64 %x, i64 %y) {
+; GFX6-LABEL: v_mul_sub_1_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, -1, v3, vcc
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX6-NEXT:    v_mul_hi_u32 v4, v0, v2
+; GFX6-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX6-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v3
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_sub_1_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GFX7-NEXT:    v_mov_b32_e32 v4, v1
+; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, -1, v3, vcc
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX7-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v4, v2
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, -1, v2
+; GFX8-NEXT:    v_mov_b32_e32 v4, v1
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, -1, v3, vcc
+; GFX8-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX8-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX8-NEXT:    v_mul_lo_u32 v2, v4, v2
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, -1, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v3, vcc
+; GFX9-NEXT:    v_mul_lo_u32 v4, v1, v2
+; GFX9-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX9-NEXT:    v_add3_u32 v1, v1, v3, v4
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v2, -1
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, -1, v3, vcc_lo
+; GFX10-NEXT:    v_mul_lo_u32 v4, v1, v2
+; GFX10-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v0, v2, 0
+; GFX10-NEXT:    v_add3_u32 v1, v1, v3, v4
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i64 %y, 1
+  %mul = mul i64 %x, %sub
+  ret i64 %mul
+}
+
+define i64 @v_mul_sub_1_i64_commute(i64 %x, i64 %y) {
+; GFX6-LABEL: v_mul_sub_1_i64_commute:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, -1, v3, vcc
+; GFX6-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX6-NEXT:    v_mul_hi_u32 v4, v2, v0
+; GFX6-NEXT:    v_mul_lo_u32 v3, v3, v0
+; GFX6-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_sub_1_i64_commute:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v4, v0
+; GFX7-NEXT:    v_add_i32_e32 v0, vcc, -1, v2
+; GFX7-NEXT:    v_addc_u32_e32 v2, vcc, -1, v3, vcc
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX7-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v4, 0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v2, v4
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_i64_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v4, v0
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, -1, v2
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, -1, v3, vcc
+; GFX8-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX8-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v4, 0
+; GFX8-NEXT:    v_mul_lo_u32 v2, v2, v4
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_i64_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, -1, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v3, vcc
+; GFX9-NEXT:    v_mul_lo_u32 v3, v3, v0
+; GFX9-NEXT:    v_mul_lo_u32 v4, v2, v1
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, v0, 0
+; GFX9-NEXT:    v_add3_u32 v1, v1, v4, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_i64_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v2, -1
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, -1, v3, vcc_lo
+; GFX10-NEXT:    v_mul_lo_u32 v4, v2, v1
+; GFX10-NEXT:    v_mul_lo_u32 v3, v3, v0
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v2, v0, 0
+; GFX10-NEXT:    v_add3_u32 v1, v1, v4, v3
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i64 %y, 1
+  %mul = mul i64 %sub, %x
+  ret i64 %mul
+}
+
+define i64 @v_mul_sub_x_i64(i64 %x, i64 %y) {
+; GFX6-LABEL: v_mul_sub_x_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX6-NEXT:    v_mul_hi_u32 v4, v0, v2
+; GFX6-NEXT:    v_mul_lo_u32 v5, v1, v2
+; GFX6-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, v2, v0
+; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v3, v1, vcc
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_sub_x_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_mul_lo_u32 v5, v0, v3
+; GFX7-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v0, v2, 0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v1, v2
+; GFX7-NEXT:    v_add_i32_e32 v4, vcc, v4, v5
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v3, v0
+; GFX7-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_x_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_lo_u32 v5, v0, v3
+; GFX8-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v0, v2, 0
+; GFX8-NEXT:    v_mul_lo_u32 v2, v1, v2
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v5
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v4, v2
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v3, v0
+; GFX8-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_x_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mul_lo_u32 v4, v1, v2
+; GFX9-NEXT:    v_mul_lo_u32 v5, v0, v3
+; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v0, v2, 0
+; GFX9-NEXT:    v_add3_u32 v3, v3, v5, v4
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
+; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_x_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mul_lo_u32 v4, v1, v2
+; GFX10-NEXT:    v_mul_lo_u32 v5, v0, v3
+; GFX10-NEXT:    v_mad_u64_u32 v[2:3], null, v0, v2, 0
+; GFX10-NEXT:    v_add3_u32 v3, v3, v5, v4
+; GFX10-NEXT:    v_sub_co_u32 v0, vcc_lo, v2, v0
+; GFX10-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v3, v1, vcc_lo
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i64 %x, %y
+  %sub = sub i64 %mul, %x
+  ret i64 %sub
+}
+
+define i64 @v_mul_add_2_i64(i64 %x, i64 %y) {
+; GFX6-LABEL: v_mul_add_2_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 2, v2
+; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX6-NEXT:    v_mul_hi_u32 v4, v0, v2
+; GFX6-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX6-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v3
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_add_2_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, 2, v2
+; GFX7-NEXT:    v_mov_b32_e32 v4, v1
+; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX7-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v4, v2
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_2_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 2, v2
+; GFX8-NEXT:    v_mov_b32_e32 v4, v1
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX8-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX8-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX8-NEXT:    v_mul_lo_u32 v2, v4, v2
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_2_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, 2, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT:    v_mul_lo_u32 v4, v1, v2
+; GFX9-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX9-NEXT:    v_add3_u32 v1, v1, v3, v4
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_2_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v2, 2
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
+; GFX10-NEXT:    v_mul_lo_u32 v4, v1, v2
+; GFX10-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v0, v2, 0
+; GFX10-NEXT:    v_add3_u32 v1, v1, v3, v4
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i64 %y, 2
+  %mul = mul i64 %x, %add
+  ret i64 %mul
+}
+
+define i64 @v_mul_sub_2_i64(i64 %x, i64 %y) {
+; GFX6-LABEL: v_mul_sub_2_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, -2, v2
+; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, -1, v3, vcc
+; GFX6-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX6-NEXT:    v_mul_hi_u32 v4, v0, v2
+; GFX6-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX6-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v3
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_sub_2_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_add_i32_e32 v2, vcc, -2, v2
+; GFX7-NEXT:    v_mov_b32_e32 v4, v1
+; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, -1, v3, vcc
+; GFX7-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX7-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX7-NEXT:    v_mul_lo_u32 v2, v4, v2
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_2_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, -2, v2
+; GFX8-NEXT:    v_mov_b32_e32 v4, v1
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, -1, v3, vcc
+; GFX8-NEXT:    v_mul_lo_u32 v3, v0, v1
+; GFX8-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX8-NEXT:    v_mul_lo_u32 v2, v4, v2
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_2_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, -2, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v3, vcc
+; GFX9-NEXT:    v_mul_lo_u32 v4, v1, v2
+; GFX9-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
+; GFX9-NEXT:    v_add3_u32 v1, v1, v3, v4
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_2_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v2, -2
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, -1, v3, vcc_lo
+; GFX10-NEXT:    v_mul_lo_u32 v4, v1, v2
+; GFX10-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v0, v2, 0
+; GFX10-NEXT:    v_add3_u32 v1, v1, v3, v4
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub i64 %y, 2
+  %mul = mul i64 %x, %sub
+  ret i64 %mul
+}
+
+define <2 x i32> @v_mul_add_1_i32_multiple(i32 %x, i32 %y, i32 %z) {
+; GFX67-LABEL: v_mul_add_1_i32_multiple:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i32_multiple:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 1, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i32_multiple:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i32_multiple:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i32 %y, 1
+  %mul0 = mul i32 %x, %add
+  %mul1 = mul i32 %z, %add
+  %insert.0 = insertelement <2 x i32> poison, i32 %mul0, i32 0
+  %insert.1 = insertelement <2 x i32> %insert.0, i32 %mul1, i32 1
+  ret <2 x i32> %insert.1
+}
+
+define <2 x i32> @v_mul_add_1_i32_other_use(i32 %x, i32 %y, i32 %z) {
+; GFX67-LABEL: v_mul_add_1_i32_other_use:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i32_other_use:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 1, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i32_other_use:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i32_other_use:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i32 %y, 1
+  %mul0 = mul i32 %x, %add
+  %mul1 = mul i32 %z, %add
+  %insert.0 = insertelement <2 x i32> poison, i32 %mul0, i32 0
+  %insert.1 = insertelement <2 x i32> %insert.0, i32 %add, i32 1
+  ret <2 x i32> %insert.1
+}
+
+define i32 @v_mul_add_1_i32_chain(i32 %arg0, i32 %arg1, i32 %arg2) {
+; GFX67-LABEL: v_mul_add_1_i32_chain:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
+; GFX67-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, v1, v2
+; GFX67-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i32_chain:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 1, v0
+; GFX8-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v1, v2
+; GFX8-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 1, v1
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i32_chain:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v2, 1, v0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX9-NEXT:    v_add_u32_e32 v2, v1, v2
+; GFX9-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX9-NEXT:    v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i32_chain:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, 1, v0
+; GFX10-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, v1, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v1
+; GFX10-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %i2 = add i32 %arg0, 1
+  %i3 = mul i32 %i2, %arg1
+  %i4 = add i32 %i3, %i2
+  %i5 = mul i32 %i4, %arg0
+  %i6 = add i32 %i3, 1
+  %i7 = mul i32 %i5, %i6
+  ret i32 %i7
+}
+define <2 x i16> @v_mul_add_1_v2i16(<2 x i16> %x, <2 x i16> %y) {
+; GFX67-LABEL: v_mul_add_1_v2i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 1, v2
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 0x10000, v3
+; GFX67-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v3, 1
+; GFX8-NEXT:    v_add_u16_e32 v2, 1, v1
+; GFX8-NEXT:    v_add_u16_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mul_lo_u16_sdwa v1, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v2
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_sub_u16 v1, v1, -1 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_v2i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_pk_sub_u16 v1, v1, -1 op_sel_hi:[1,0]
+; GFX10-NEXT:    v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add <2 x i16> %y, <i16 1, i16 1>
+  %mul = mul <2 x i16> %x, %add
+  ret <2 x i16> %mul
+}
+
+define <2 x i16> @v_mul_add_1_v2i16_commute(<2 x i16> %x, <2 x i16> %y) {
+; GFX67-LABEL: v_mul_add_1_v2i16_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 1, v2
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 0x10000, v3
+; GFX67-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v2, v0
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v3, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_v2i16_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v3, 1
+; GFX8-NEXT:    v_add_u16_e32 v2, 1, v1
+; GFX8-NEXT:    v_add_u16_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mul_lo_u16_sdwa v1, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v2, v0
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_v2i16_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_sub_u16 v1, v1, -1 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_pk_mul_lo_u16 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_v2i16_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_pk_sub_u16 v1, v1, -1 op_sel_hi:[1,0]
+; GFX10-NEXT:    v_pk_mul_lo_u16 v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add <2 x i16> %y, <i16 1, i16 1>
+  %mul = mul <2 x i16> %add, %x
+  ret <2 x i16> %mul
+}
+
+define <2 x i16> @v_mul_add_x_v2i16(<2 x i16> %x, <2 x i16> %y) {
+; GFX67-LABEL: v_mul_add_x_v2i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_and_b32_e32 v4, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; GFX67-NEXT:    v_and_b32_e32 v5, 0xffff, v1
+; GFX67-NEXT:    v_and_b32_e32 v3, 0xffff, v3
+; GFX67-NEXT:    v_mad_u32_u24 v1, v5, v3, v1
+; GFX67-NEXT:    v_mad_u32_u24 v0, v4, v2, v0
+; GFX67-NEXT:    v_lshlrev_b32_e32 v3, 16, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_or_b32_e32 v0, v0, v3
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_x_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
+; GFX8-NEXT:    v_mad_u16 v2, v3, v2, v3
+; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT:    v_mad_u16 v0, v0, v1, v0
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_x_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_mul_lo_u16 v1, v0, v1
+; GFX9-NEXT:    v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_x_v2i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_pk_mul_lo_u16 v1, v0, v1
+; GFX10-NEXT:    v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul <2 x i16> %x, %y
+  %add = add <2 x i16> %x, %mul
+  ret <2 x i16> %add
+}
+
+define <2 x i16> @v_mul_sub_1_v2i16(<2 x i16> %x, <2 x i16> %y) {
+; GFX67-LABEL: v_mul_sub_1_v2i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 0xffff0000, v3
+; GFX67-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v3, -1
+; GFX8-NEXT:    v_add_u16_e32 v2, -1, v1
+; GFX8-NEXT:    v_add_u16_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mul_lo_u16_sdwa v1, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v2
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_sub_i16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_v2i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_pk_sub_i16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX10-NEXT:    v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub <2 x i16> %y, <i16 1, i16 1>
+  %mul = mul <2 x i16> %x, %sub
+  ret <2 x i16> %mul
+}
+
+define <2 x i16> @v_mul_sub_1_v2i16_commute(<2 x i16> %x, <2 x i16> %y) {
+; GFX67-LABEL: v_mul_sub_1_v2i16_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 0xffff0000, v3
+; GFX67-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v2, v0
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v3, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_v2i16_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v3, -1
+; GFX8-NEXT:    v_add_u16_e32 v2, -1, v1
+; GFX8-NEXT:    v_add_u16_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mul_lo_u16_sdwa v1, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v2, v0
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_v2i16_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_sub_i16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_pk_mul_lo_u16 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_v2i16_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_pk_sub_i16 v1, v1, 1 op_sel_hi:[1,0]
+; GFX10-NEXT:    v_pk_mul_lo_u16 v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub <2 x i16> %y, <i16 1, i16 1>
+  %mul = mul <2 x i16> %sub, %x
+  ret <2 x i16> %mul
+}
+
+define <2 x i16> @v_mul_sub_x_v2i16(<2 x i16> %x, <2 x i16> %y) {
+; GFX67-LABEL: v_mul_sub_x_v2i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_and_b32_e32 v4, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v2, v4, v2
+; GFX67-NEXT:    v_and_b32_e32 v4, 0xffff, v1
+; GFX67-NEXT:    v_and_b32_e32 v3, 0xffff, v3
+; GFX67-NEXT:    v_mul_u32_u24_e32 v3, v4, v3
+; GFX67-NEXT:    v_sub_i32_e32 v1, vcc, v3, v1
+; GFX67-NEXT:    v_sub_i32_e32 v0, vcc, v2, v0
+; GFX67-NEXT:    v_lshlrev_b32_e32 v3, 16, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_or_b32_e32 v0, v0, v3
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_x_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT:    v_mul_lo_u16_sdwa v3, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v1, v0, v1
+; GFX8-NEXT:    v_sub_u16_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT:    v_sub_u16_e32 v0, v1, v0
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_x_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_mul_lo_u16 v1, v0, v1
+; GFX9-NEXT:    v_pk_sub_i16 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_x_v2i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_pk_mul_lo_u16 v1, v0, v1
+; GFX10-NEXT:    v_pk_sub_i16 v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul <2 x i16> %x, %y
+  %sub = sub <2 x i16> %mul, %x
+  ret <2 x i16> %sub
+}
+
+define <2 x i16> @v_mul_add_2_v2i16(<2 x i16> %x, <2 x i16> %y) {
+; GFX67-LABEL: v_mul_add_2_v2i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 2, v2
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 0x20000, v3
+; GFX67-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_2_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v3, 2
+; GFX8-NEXT:    v_add_u16_e32 v2, 2, v1
+; GFX8-NEXT:    v_add_u16_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mul_lo_u16_sdwa v1, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v2
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_2_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_sub_u16 v1, v1, -2 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_2_v2i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_pk_sub_u16 v1, v1, -2 op_sel_hi:[1,0]
+; GFX10-NEXT:    v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add <2 x i16> %y, <i16 2, i16 2>
+  %mul = mul <2 x i16> %x, %add
+  ret <2 x i16> %mul
+}
+
+define <2 x i16> @v_mul_sub_2_v2i16(<2 x i16> %x, <2 x i16> %y) {
+; GFX67-LABEL: v_mul_sub_2_v2i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, -2, v2
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 0xfffe0000, v3
+; GFX67-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_2_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v3, -2
+; GFX8-NEXT:    v_add_u16_e32 v2, -2, v1
+; GFX8-NEXT:    v_add_u16_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mul_lo_u16_sdwa v1, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v2
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_2_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_sub_i16 v1, v1, 2 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_2_v2i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_pk_sub_i16 v1, v1, 2 op_sel_hi:[1,0]
+; GFX10-NEXT:    v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub <2 x i16> %y, <i16 2, i16 2>
+  %mul = mul <2 x i16> %x, %sub
+  ret <2 x i16> %mul
+}
+
+define <2 x i32> @v_mul_add_1_v2i32(<2 x i32> %x, <2 x i32> %y) {
+; GFX67-LABEL: v_mul_add_1_v2i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 1, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 1, v2
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX67-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 1, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 1, v2
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX8-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, 1, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, 1, v2
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_v2i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, 1, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, 1, v3
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX10-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add <2 x i32> %y, <i32 1, i32 1>
+  %mul = mul <2 x i32> %x, %add
+  ret <2 x i32> %mul
+}
+
+define <2 x i32> @v_mul_add_1_v2i32_commute(<2 x i32> %x, <2 x i32> %y) {
+; GFX67-LABEL: v_mul_add_1_v2i32_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 1, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 1, v2
+; GFX67-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX67-NEXT:    v_mul_lo_u32 v1, v3, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_v2i32_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 1, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 1, v2
+; GFX8-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX8-NEXT:    v_mul_lo_u32 v1, v3, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_v2i32_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, 1, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, 1, v2
+; GFX9-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v3, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_v2i32_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, 1, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, 1, v3
+; GFX10-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX10-NEXT:    v_mul_lo_u32 v1, v3, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add <2 x i32> %y, <i32 1, i32 1>
+  %mul = mul <2 x i32> %add, %x
+  ret <2 x i32> %mul
+}
+
+define <2 x i32> @v_mul_add_x_v2i32(<2 x i32> %x, <2 x i32> %y) {
+; GFX67-LABEL: v_mul_add_x_v2i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GFX67-NEXT:    v_mul_lo_u32 v3, v1, v3
+; GFX67-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_x_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GFX8-NEXT:    v_mul_lo_u32 v3, v1, v3
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX900-LABEL: v_mul_add_x_v2i32:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v0, v2, v[0:1]
+; GFX900-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v1, v3, v[1:2]
+; GFX900-NEXT:    v_mov_b32_e32 v0, v4
+; GFX900-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: v_mul_add_x_v2i32:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    v_mov_b32_e32 v4, v1
+; GFX90A-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v2, v[0:1]
+; GFX90A-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v4, v3, v[4:5]
+; GFX90A-NEXT:    v_mov_b32_e32 v1, v2
+; GFX90A-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_x_v2i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mad_u64_u32 v[4:5], null, v0, v2, v[0:1]
+; GFX10-NEXT:    v_mad_u64_u32 v[1:2], null, v1, v3, v[1:2]
+; GFX10-NEXT:    v_mov_b32_e32 v0, v4
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul <2 x i32> %x, %y
+  %add = add <2 x i32> %x, %mul
+  ret <2 x i32> %add
+}
+
+define <2 x i32> @v_mul_sub_1_v2i32(<2 x i32> %x, <2 x i32> %y) {
+; GFX67-LABEL: v_mul_sub_1_v2i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, -1, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX67-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, -1, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, -1, v2
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX8-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, -1, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, -1, v2
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_v2i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, -1, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, -1, v3
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX10-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub <2 x i32> %y, <i32 1, i32 1>
+  %mul = mul <2 x i32> %x, %sub
+  ret <2 x i32> %mul
+}
+
+define <2 x i32> @v_mul_sub_1_v2i32_commute(<2 x i32> %x, <2 x i32> %y) {
+; GFX67-LABEL: v_mul_sub_1_v2i32_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, -1, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GFX67-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX67-NEXT:    v_mul_lo_u32 v1, v3, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_v2i32_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, -1, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, -1, v2
+; GFX8-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX8-NEXT:    v_mul_lo_u32 v1, v3, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_v2i32_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, -1, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, -1, v2
+; GFX9-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX9-NEXT:    v_mul_lo_u32 v1, v3, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_v2i32_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, -1, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, -1, v3
+; GFX10-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX10-NEXT:    v_mul_lo_u32 v1, v3, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub <2 x i32> %y, <i32 1, i32 1>
+  %mul = mul <2 x i32> %sub, %x
+  ret <2 x i32> %mul
+}
+
+define <2 x i32> @v_mul_sub_x_v2i32(<2 x i32> %x, <2 x i32> %y) {
+; GFX67-LABEL: v_mul_sub_x_v2i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GFX67-NEXT:    v_mul_lo_u32 v3, v1, v3
+; GFX67-NEXT:    v_sub_i32_e32 v0, vcc, v2, v0
+; GFX67-NEXT:    v_sub_i32_e32 v1, vcc, v3, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_x_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GFX8-NEXT:    v_mul_lo_u32 v3, v1, v3
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v2, v0
+; GFX8-NEXT:    v_sub_u32_e32 v1, vcc, v3, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX900-LABEL: v_mul_sub_x_v2i32:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GFX900-NEXT:    v_mul_lo_u32 v3, v1, v3
+; GFX900-NEXT:    v_sub_u32_e32 v0, v2, v0
+; GFX900-NEXT:    v_sub_u32_e32 v1, v3, v1
+; GFX900-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: v_mul_sub_x_v2i32:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    v_mul_lo_u32 v3, v1, v3
+; GFX90A-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GFX90A-NEXT:    v_sub_u32_e32 v0, v2, v0
+; GFX90A-NEXT:    v_sub_u32_e32 v1, v3, v1
+; GFX90A-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_x_v2i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GFX10-NEXT:    v_mul_lo_u32 v3, v1, v3
+; GFX10-NEXT:    v_sub_nc_u32_e32 v0, v2, v0
+; GFX10-NEXT:    v_sub_nc_u32_e32 v1, v3, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul <2 x i32> %x, %y
+  %sub = sub <2 x i32> %mul, %x
+  ret <2 x i32> %sub
+}
+
+define <2 x i32> @v_mul_add_2_v2i32(<2 x i32> %x, <2 x i32> %y) {
+; GFX67-LABEL: v_mul_add_2_v2i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 2, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 2, v2
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX67-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_2_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 2, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 2, v2
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX8-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_2_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, 2, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, 2, v2
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_2_v2i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, 2, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, 2, v3
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX10-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add <2 x i32> %y, <i32 2, i32 2>
+  %mul = mul <2 x i32> %x, %add
+  ret <2 x i32> %mul
+}
+
+define <2 x i32> @v_mul_sub_2_v2i32(<2 x i32> %x, <2 x i32> %y) {
+; GFX67-LABEL: v_mul_sub_2_v2i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, -2, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, -2, v2
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX67-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_2_v2i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, -2, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, -2, v2
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX8-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_2_v2i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, -2, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, -2, v2
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX9-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_2_v2i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, -2, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, -2, v3
+; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GFX10-NEXT:    v_mul_lo_u32 v1, v1, v3
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub <2 x i32> %y, <i32 2, i32 2>
+  %mul = mul <2 x i32> %x, %sub
+  ret <2 x i32> %mul
+}
+
+define <2 x i24> @v_mul_add_1_v2i24(<2 x i24> %x, <2 x i24> %y) {
+; GFX67-LABEL: v_mul_add_1_v2i24:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 1, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 1, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_v2i24:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 1, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 1, v2
+; GFX8-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX8-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_v2i24:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, 1, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, 1, v2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_v2i24:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, 1, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, 1, v3
+; GFX10-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX10-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add <2 x i24> %y, <i24 1, i24 1>
+  %mul = mul <2 x i24> %x, %add
+  ret <2 x i24> %mul
+}
+
+define <2 x i24> @v_mul_add_1_v2i24_commute(<2 x i24> %x, <2 x i24> %y) {
+; GFX67-LABEL: v_mul_add_1_v2i24_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 1, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 1, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v2, v0
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v3, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_v2i24_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 1, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 1, v2
+; GFX8-NEXT:    v_mul_u32_u24_e32 v0, v2, v0
+; GFX8-NEXT:    v_mul_u32_u24_e32 v1, v3, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_v2i24_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, 1, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, 1, v2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v2, v0
+; GFX9-NEXT:    v_mul_u32_u24_e32 v1, v3, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_v2i24_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, 1, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, 1, v3
+; GFX10-NEXT:    v_mul_u32_u24_e32 v0, v2, v0
+; GFX10-NEXT:    v_mul_u32_u24_e32 v1, v3, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add <2 x i24> %y, <i24 1, i24 1>
+  %mul = mul <2 x i24> %add, %x
+  ret <2 x i24> %mul
+}
+
+define <2 x i24> @v_mul_add_x_v2i24(<2 x i24> %x, <2 x i24> %y) {
+; GFX67-LABEL: v_mul_add_x_v2i24:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mad_u32_u24 v0, v0, v2, v0
+; GFX67-NEXT:    v_mad_u32_u24 v1, v1, v3, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_x_v2i24:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mad_u32_u24 v0, v0, v2, v0
+; GFX8-NEXT:    v_mad_u32_u24 v1, v1, v3, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_x_v2i24:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mad_u32_u24 v0, v0, v2, v0
+; GFX9-NEXT:    v_mad_u32_u24 v1, v1, v3, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_x_v2i24:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mad_u32_u24 v0, v0, v2, v0
+; GFX10-NEXT:    v_mad_u32_u24 v1, v1, v3, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul <2 x i24> %x, %y
+  %add = add <2 x i24> %x, %mul
+  ret <2 x i24> %add
+}
+
+define <2 x i24> @v_mul_sub_1_v2i24(<2 x i24> %x, <2 x i24> %y) {
+; GFX67-LABEL: v_mul_sub_1_v2i24:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, -1, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_v2i24:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, -1, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, -1, v2
+; GFX8-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX8-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_v2i24:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, -1, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, -1, v2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_v2i24:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, -1, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, -1, v3
+; GFX10-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX10-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub <2 x i24> %y, <i24 1, i24 1>
+  %mul = mul <2 x i24> %x, %sub
+  ret <2 x i24> %mul
+}
+
+define <2 x i24> @v_mul_sub_1_v2i24_commute(<2 x i24> %x, <2 x i24> %y) {
+; GFX67-LABEL: v_mul_sub_1_v2i24_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, -1, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v2, v0
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v3, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_1_v2i24_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, -1, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, -1, v2
+; GFX8-NEXT:    v_mul_u32_u24_e32 v0, v2, v0
+; GFX8-NEXT:    v_mul_u32_u24_e32 v1, v3, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_1_v2i24_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, -1, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, -1, v2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v2, v0
+; GFX9-NEXT:    v_mul_u32_u24_e32 v1, v3, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_1_v2i24_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, -1, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, -1, v3
+; GFX10-NEXT:    v_mul_u32_u24_e32 v0, v2, v0
+; GFX10-NEXT:    v_mul_u32_u24_e32 v1, v3, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub <2 x i24> %y, <i24 1, i24 1>
+  %mul = mul <2 x i24> %sub, %x
+  ret <2 x i24> %mul
+}
+
+define <2 x i24> @v_mul_sub_x_v2i24(<2 x i24> %x, <2 x i24> %y) {
+; GFX67-LABEL: v_mul_sub_x_v2i24:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mul_u32_u24_e32 v2, v0, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v3, v1, v3
+; GFX67-NEXT:    v_sub_i32_e32 v0, vcc, v2, v0
+; GFX67-NEXT:    v_sub_i32_e32 v1, vcc, v3, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_x_v2i24:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_u32_u24_e32 v2, v0, v2
+; GFX8-NEXT:    v_mul_u32_u24_e32 v3, v1, v3
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v2, v0
+; GFX8-NEXT:    v_sub_u32_e32 v1, vcc, v3, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_x_v2i24:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mul_u32_u24_e32 v2, v0, v2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v3, v1, v3
+; GFX9-NEXT:    v_sub_u32_e32 v0, v2, v0
+; GFX9-NEXT:    v_sub_u32_e32 v1, v3, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_x_v2i24:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mul_u32_u24_e32 v2, v0, v2
+; GFX10-NEXT:    v_mul_u32_u24_e32 v3, v1, v3
+; GFX10-NEXT:    v_sub_nc_u32_e32 v0, v2, v0
+; GFX10-NEXT:    v_sub_nc_u32_e32 v1, v3, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul <2 x i24> %x, %y
+  %sub = sub <2 x i24> %mul, %x
+  ret <2 x i24> %sub
+}
+
+define <2 x i24> @v_mul_add_2_v2i24(<2 x i24> %x, <2 x i24> %y) {
+; GFX67-LABEL: v_mul_add_2_v2i24:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, 2, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 2, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_2_v2i24:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 2, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 2, v2
+; GFX8-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX8-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_2_v2i24:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, 2, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, 2, v2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_2_v2i24:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, 2, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, 2, v3
+; GFX10-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX10-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add <2 x i24> %y, <i24 2, i24 2>
+  %mul = mul <2 x i24> %x, %add
+  ret <2 x i24> %mul
+}
+
+define <2 x i24> @v_mul_sub_2_v2i24(<2 x i24> %x, <2 x i24> %y) {
+; GFX67-LABEL: v_mul_sub_2_v2i24:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v3, vcc, -2, v3
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, -2, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_sub_2_v2i24:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, -2, v3
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, -2, v2
+; GFX8-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX8-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_sub_2_v2i24:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v3, -2, v3
+; GFX9-NEXT:    v_add_u32_e32 v2, -2, v2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX9-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_sub_2_v2i24:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, -2, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, -2, v3
+; GFX10-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX10-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %sub = sub <2 x i24> %y, <i24 2, i24 2>
+  %mul = mul <2 x i24> %x, %sub
+  ret <2 x i24> %mul
+}
+
+define i32 @v_mul_9_add_52_i32(i32 %arg) {
+; GFX67-LABEL: v_mul_9_add_52_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, 9
+; GFX67-NEXT:    v_add_i32_e32 v0, vcc, 52, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_9_add_52_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, 9
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 52, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_9_add_52_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, 9, 52
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_9_add_52_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v0, 9, 52
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i32 %arg, 9
+  %add = add i32 %mul, 52
+  ret i32 %add
+}
+
+define i16 @v_mul_9_add_52_i16(i16 %arg) {
+; GFX67-LABEL: v_mul_9_add_52_i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_mad_u32_u24 v0, v0, 9, 52
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_9_add_52_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mad_u16 v0, v0, 9, 52
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_9_add_52_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mad_legacy_u16 v0, v0, 9, 52
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_9_add_52_i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mad_u16 v0, v0, 9, 52
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i16 %arg, 9
+  %add = add i16 %mul, 52
+  ret i16 %add
+}
+
+define <2 x i16> @v_mul_9_add_52_v2i16(<2 x i16> %arg) {
+; GFX67-LABEL: v_mul_9_add_52_v2i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, 9, v1
+; GFX67-NEXT:    v_mad_u32_u24 v0, v0, 9, 52
+; GFX67-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 0x340000, v1
+; GFX67-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX67-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_9_add_52_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; GFX8-NEXT:    v_mad_u16 v1, v1, 9, 52
+; GFX8-NEXT:    v_mad_u16 v0, v0, 9, 52
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_9_add_52_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_mul_lo_u16 v0, v0, 9 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_pk_add_u16 v0, v0, 52 op_sel_hi:[1,0]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_9_add_52_v2i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_pk_mul_lo_u16 v0, v0, 9 op_sel_hi:[1,0]
+; GFX10-NEXT:    v_pk_add_u16 v0, v0, 52 op_sel_hi:[1,0]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul <2 x i16> %arg, <i16 9, i16 9>
+  %add = add <2 x i16> %mul, <i16 52, i16 52>
+  ret <2 x i16> %add
+}
+
+define i64 @v_mul_9_add_52_i64(i64 %arg) {
+; GFX6-LABEL: v_mul_9_add_52_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_mul_lo_u32 v1, v1, 9
+; GFX6-NEXT:    v_mul_hi_u32 v2, v0, 9
+; GFX6-NEXT:    v_mul_lo_u32 v0, v0, 9
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 52, v0
+; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_9_add_52_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_mul_lo_u32 v2, v1, 9
+; GFX7-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, 9, 52
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_9_add_52_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_lo_u32 v2, v1, 9
+; GFX8-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, 9, 52
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX900-LABEL: v_mul_9_add_52_i64:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT:    v_mov_b32_e32 v2, v1
+; GFX900-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, 9, 52
+; GFX900-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v2, 9, v[1:2]
+; GFX900-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: v_mul_9_add_52_i64:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    v_mov_b32_e32 v2, v1
+; GFX90A-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, 9, 52
+; GFX90A-NEXT:    v_mov_b32_e32 v4, v1
+; GFX90A-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v2, 9, v[4:5]
+; GFX90A-NEXT:    v_mov_b32_e32 v1, v2
+; GFX90A-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_9_add_52_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mov_b32_e32 v2, v1
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v0, 9, 52
+; GFX10-NEXT:    v_mad_u64_u32 v[1:2], null, v2, 9, v[1:2]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i64 %arg, 9
+  %add = add i64 %mul, 52
+  ret i64 %add
+}
+
+define i32 @v_mul_5_add_1_i32(i32 %arg) {
+; GFX67-LABEL: v_mul_5_add_1_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, 5
+; GFX67-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_5_add_1_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, 5
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_5_add_1_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, 5, 1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_5_add_1_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v0, 5, 1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i32 %arg, 5
+  %add = add i32 %mul, 1
+  ret i32 %add
+}
+
+define i32 @v_mul_284_add_82_i32(i32 %arg) {
+; GFX67-LABEL: v_mul_284_add_82_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    s_movk_i32 s4, 0x11c
+; GFX67-NEXT:    v_mul_lo_u32 v0, v0, s4
+; GFX67-NEXT:    v_add_i32_e32 v0, vcc, 0x52, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_284_add_82_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_movk_i32 s4, 0x11c
+; GFX8-NEXT:    v_mul_lo_u32 v0, v0, s4
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 0x52, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_284_add_82_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_movk_i32 s4, 0x11c
+; GFX9-NEXT:    v_mul_lo_u32 v0, v0, s4
+; GFX9-NEXT:    v_add_u32_e32 v0, 0x52, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_284_add_82_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    s_movk_i32 s4, 0x11c
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v0, s4, 0x52
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i32 %arg, 284
+  %add = add i32 %mul, 82
+  ret i32 %add
+}
+
+define i16 @v_mul_5_add_1_i16(i16 %arg) {
+; GFX67-LABEL: v_mul_5_add_1_i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_mad_u32_u24 v0, v0, 5, 1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_5_add_1_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mad_u16 v0, v0, 5, 1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_5_add_1_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mad_legacy_u16 v0, v0, 5, 1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_5_add_1_i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mad_u16 v0, v0, 5, 1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i16 %arg, 5
+  %add = add i16 %mul, 1
+  ret i16 %add
+}
+
+define i16 @v_mul_284_add_82_i16(i16 %arg) {
+; GFX67-LABEL: v_mul_284_add_82_i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    s_movk_i32 s4, 0x11c
+; GFX67-NEXT:    v_mov_b32_e32 v1, 0x52
+; GFX67-NEXT:    v_mad_u32_u24 v0, v0, s4, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_284_add_82_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_movk_i32 s4, 0x11c
+; GFX8-NEXT:    v_mov_b32_e32 v1, 0x52
+; GFX8-NEXT:    v_mad_u16 v0, v0, s4, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_284_add_82_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_movk_i32 s4, 0x11c
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0x52
+; GFX9-NEXT:    v_mad_legacy_u16 v0, v0, s4, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_284_add_82_i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    s_movk_i32 s4, 0x11c
+; GFX10-NEXT:    v_mad_u16 v0, v0, s4, 0x52
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i16 %arg, 284
+  %add = add i16 %mul, 82
+  ret i16 %add
+}
+
+define <2 x i16> @v_mul_5_add_1_v2i16(<2 x i16> %arg) {
+; GFX67-LABEL: v_mul_5_add_1_v2i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, 5, v1
+; GFX67-NEXT:    v_mad_u32_u24 v0, v0, 5, 1
+; GFX67-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 0x10000, v1
+; GFX67-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX67-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_5_add_1_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; GFX8-NEXT:    v_mad_u16 v1, v1, 5, 1
+; GFX8-NEXT:    v_mad_u16 v0, v0, 5, 1
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_5_add_1_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_mul_lo_u16 v0, v0, 5 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_pk_sub_u16 v0, v0, -1 op_sel_hi:[1,0]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_5_add_1_v2i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_pk_mul_lo_u16 v0, v0, 5 op_sel_hi:[1,0]
+; GFX10-NEXT:    v_pk_sub_u16 v0, v0, -1 op_sel_hi:[1,0]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul <2 x i16> %arg, <i16 5, i16 5>
+  %add = add <2 x i16> %mul, <i16 1, i16 1>
+  ret <2 x i16> %add
+}
+
+define <2 x i16> @v_mul_284_add_82_v2i16(<2 x i16> %arg) {
+; GFX67-LABEL: v_mul_284_add_82_v2i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX67-NEXT:    s_movk_i32 s4, 0x11c
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, 0x11c, v1
+; GFX67-NEXT:    v_mov_b32_e32 v2, 0x52
+; GFX67-NEXT:    v_mad_u32_u24 v0, v0, s4, v2
+; GFX67-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xfffe, v0
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 0x520000, v1
+; GFX67-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX67-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_284_add_82_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; GFX8-NEXT:    s_movk_i32 s4, 0x11c
+; GFX8-NEXT:    v_mov_b32_e32 v2, 0x52
+; GFX8-NEXT:    v_mad_u16 v1, v1, s4, v2
+; GFX8-NEXT:    v_mad_u16 v0, v0, s4, v2
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_284_add_82_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_movk_i32 s4, 0x11c
+; GFX9-NEXT:    v_pk_mul_lo_u16 v0, v0, s4 op_sel_hi:[1,0]
+; GFX9-NEXT:    s_movk_i32 s4, 0x52
+; GFX9-NEXT:    v_pk_add_u16 v0, v0, s4 op_sel_hi:[1,0]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_284_add_82_v2i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_pk_mul_lo_u16 v0, 0x11c, v0 op_sel_hi:[0,1]
+; GFX10-NEXT:    v_pk_add_u16 v0, 0x52, v0 op_sel_hi:[0,1]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul <2 x i16> %arg, <i16 284, i16 284>
+  %add = add <2 x i16> %mul, <i16 82, i16 82>
+  ret <2 x i16> %add
+}
+
+define i64 @v_mul_5_add_1_i64(i64 %arg) {
+; GFX6-LABEL: v_mul_5_add_1_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_mul_lo_u32 v1, v1, 5
+; GFX6-NEXT:    v_mul_hi_u32 v2, v0, 5
+; GFX6-NEXT:    v_mul_lo_u32 v0, v0, 5
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
+; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_5_add_1_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_mul_lo_u32 v2, v1, 5
+; GFX7-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, 5, 1
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_5_add_1_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_lo_u32 v2, v1, 5
+; GFX8-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, 5, 1
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX900-LABEL: v_mul_5_add_1_i64:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT:    v_mov_b32_e32 v2, v1
+; GFX900-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, 5, 1
+; GFX900-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v2, 5, v[1:2]
+; GFX900-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: v_mul_5_add_1_i64:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    v_mov_b32_e32 v2, v1
+; GFX90A-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, 5, 1
+; GFX90A-NEXT:    v_mov_b32_e32 v4, v1
+; GFX90A-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v2, 5, v[4:5]
+; GFX90A-NEXT:    v_mov_b32_e32 v1, v2
+; GFX90A-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_5_add_1_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_mov_b32_e32 v2, v1
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, v0, 5, 1
+; GFX10-NEXT:    v_mad_u64_u32 v[1:2], null, v2, 5, v[1:2]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i64 %arg, 5
+  %add = add i64 %mul, 1
+  ret i64 %add
+}
+
+define i64 @v_mul_284_add_82_i64(i64 %arg) {
+; GFX6-LABEL: v_mul_284_add_82_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_movk_i32 s4, 0x11c
+; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s4
+; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s4
+; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s4
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 0x52, v0
+; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_284_add_82_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v2, v1
+; GFX7-NEXT:    v_mov_b32_e32 v3, 0x52
+; GFX7-NEXT:    v_mov_b32_e32 v4, 0
+; GFX7-NEXT:    s_movk_i32 s6, 0x11c
+; GFX7-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, s6, v[3:4]
+; GFX7-NEXT:    v_mul_lo_u32 v2, v2, s6
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_284_add_82_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, v1
+; GFX8-NEXT:    v_mov_b32_e32 v3, 0x52
+; GFX8-NEXT:    v_mov_b32_e32 v4, 0
+; GFX8-NEXT:    s_movk_i32 s6, 0x11c
+; GFX8-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, s6, v[3:4]
+; GFX8-NEXT:    v_mul_lo_u32 v2, v2, s6
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX900-LABEL: v_mul_284_add_82_i64:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT:    v_mov_b32_e32 v3, 0x52
+; GFX900-NEXT:    v_mov_b32_e32 v4, 0
+; GFX900-NEXT:    s_movk_i32 s6, 0x11c
+; GFX900-NEXT:    v_mov_b32_e32 v2, v1
+; GFX900-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, s6, v[3:4]
+; GFX900-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v2, s6, v[1:2]
+; GFX900-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: v_mul_284_add_82_i64:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    v_mov_b32_e32 v4, 0x52
+; GFX90A-NEXT:    v_mov_b32_e32 v5, 0
+; GFX90A-NEXT:    s_movk_i32 s6, 0x11c
+; GFX90A-NEXT:    v_mov_b32_e32 v2, v1
+; GFX90A-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, s6, v[4:5]
+; GFX90A-NEXT:    v_mov_b32_e32 v4, v1
+; GFX90A-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v2, s6, v[4:5]
+; GFX90A-NEXT:    v_mov_b32_e32 v1, v2
+; GFX90A-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_284_add_82_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    s_mov_b64 s[4:5], 0x52
+; GFX10-NEXT:    v_mov_b32_e32 v2, v1
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, 0x11c, v0, s[4:5]
+; GFX10-NEXT:    v_mad_u64_u32 v[1:2], null, 0x11c, v2, v[1:2]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i64 %arg, 284
+  %add = add i64 %mul, 82
+  ret i64 %add
+}
+
+define i64 @v_mul_934584645_add_8234599_i64(i64 %arg) {
+; GFX6-LABEL: v_mul_934584645_add_8234599_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0x37b4a145
+; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s4
+; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s4
+; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s4
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 0x7da667, v0
+; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_mul_934584645_add_8234599_i64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v2, v1
+; GFX7-NEXT:    v_mov_b32_e32 v3, 0x7da667
+; GFX7-NEXT:    v_mov_b32_e32 v4, 0
+; GFX7-NEXT:    s_mov_b32 s6, 0x37b4a145
+; GFX7-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, s6, v[3:4]
+; GFX7-NEXT:    v_mul_lo_u32 v2, v2, s6
+; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_934584645_add_8234599_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, v1
+; GFX8-NEXT:    v_mov_b32_e32 v3, 0x7da667
+; GFX8-NEXT:    v_mov_b32_e32 v4, 0
+; GFX8-NEXT:    s_mov_b32 s6, 0x37b4a145
+; GFX8-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, s6, v[3:4]
+; GFX8-NEXT:    v_mul_lo_u32 v2, v2, s6
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v2, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX900-LABEL: v_mul_934584645_add_8234599_i64:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT:    v_mov_b32_e32 v3, 0x7da667
+; GFX900-NEXT:    v_mov_b32_e32 v4, 0
+; GFX900-NEXT:    s_mov_b32 s6, 0x37b4a145
+; GFX900-NEXT:    v_mov_b32_e32 v2, v1
+; GFX900-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, s6, v[3:4]
+; GFX900-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v2, s6, v[1:2]
+; GFX900-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX90A-LABEL: v_mul_934584645_add_8234599_i64:
+; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX90A-NEXT:    v_mov_b32_e32 v4, 0x7da667
+; GFX90A-NEXT:    v_mov_b32_e32 v5, 0
+; GFX90A-NEXT:    s_mov_b32 s6, 0x37b4a145
+; GFX90A-NEXT:    v_mov_b32_e32 v2, v1
+; GFX90A-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, s6, v[4:5]
+; GFX90A-NEXT:    v_mov_b32_e32 v4, v1
+; GFX90A-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v2, s6, v[4:5]
+; GFX90A-NEXT:    v_mov_b32_e32 v1, v2
+; GFX90A-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_934584645_add_8234599_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    s_mov_b64 s[4:5], 0x7da667
+; GFX10-NEXT:    v_mov_b32_e32 v2, v1
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, 0x37b4a145, v0, s[4:5]
+; GFX10-NEXT:    v_mad_u64_u32 v[1:2], null, 0x37b4a145, v2, v[1:2]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %mul = mul i64 %arg, 934584645
+  %add = add i64 %mul, 8234599
+  ret i64 %add
+}
+
+define amdgpu_kernel void @compute_mad(ptr addrspace(4) %i18, ptr addrspace(4) %i21, ptr addrspace(1) nocapture noundef writeonly align 4 %arg, i32 noundef %arg1) #1 {
+; GFX67-LABEL: compute_mad:
+; GFX67:       ; %bb.0: ; %bb
+; GFX67-NEXT:    s_load_dword s3, s[0:1], 0x6
+; GFX67-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX67-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX67-NEXT:    s_add_i32 s3, s3, 1
+; GFX67-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, s3, v1
+; GFX67-NEXT:    v_mul_lo_u32 v2, v2, v0
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x4
+; GFX67-NEXT:    s_load_dword s3, s[6:7], 0x1
+; GFX67-NEXT:    v_mul_lo_u32 v3, v2, v1
+; GFX67-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GFX67-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX67-NEXT:    s_and_b32 s3, s3, 0xffff
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
+; GFX67-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 1, v3
+; GFX67-NEXT:    s_mul_i32 s2, s2, s3
+; GFX67-NEXT:    v_mul_lo_u32 v3, v1, v2
+; GFX67-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
+; GFX67-NEXT:    v_mov_b32_e32 v4, s5
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GFX67-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 1, v3
+; GFX67-NEXT:    s_mov_b32 s3, 0xf000
+; GFX67-NEXT:    v_mul_lo_u32 v2, v1, v2
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_add_i32_e32 v0, vcc, s4, v0
+; GFX67-NEXT:    v_mul_lo_u32 v2, v2, v1
+; GFX67-NEXT:    v_addc_u32_e32 v1, vcc, 0, v4, vcc
+; GFX67-NEXT:    v_lshl_b64 v[0:1], v[0:1], 2
+; GFX67-NEXT:    s_mov_b32 s2, 0
+; GFX67-NEXT:    buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT:    s_endpgm
+;
+; GFX8-LABEL: compute_mad:
+; GFX8:       ; %bb.0: ; %bb
+; GFX8-NEXT:    s_load_dword s3, s[0:1], 0x18
+; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_add_i32 s3, s3, 1
+; GFX8-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, s3, v1
+; GFX8-NEXT:    v_mul_lo_u32 v2, v2, v0
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 1, v1
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x10
+; GFX8-NEXT:    s_load_dword s3, s[6:7], 0x4
+; GFX8-NEXT:    v_mul_lo_u32 v3, v2, v1
+; GFX8-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_and_b32 s3, s3, 0xffff
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v3, v1
+; GFX8-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 1, v3
+; GFX8-NEXT:    s_mul_i32 s2, s2, s3
+; GFX8-NEXT:    v_mul_lo_u32 v3, v1, v2
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; GFX8-NEXT:    v_mov_b32_e32 v4, s5
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v3, v2
+; GFX8-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 1, v3
+; GFX8-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8-NEXT:    v_mul_lo_u32 v2, v1, v2
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 1, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s4, v0
+; GFX8-NEXT:    v_mul_lo_u32 v2, v2, v1
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v4, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], 2, v[0:1]
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v3, v1, vcc
+; GFX8-NEXT:    flat_store_dword v[0:1], v2
+; GFX8-NEXT:    s_endpgm
+;
+; GFX900-LABEL: compute_mad:
+; GFX900:       ; %bb.0: ; %bb
+; GFX900-NEXT:    s_load_dword s3, s[0:1], 0x18
+; GFX900-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX900-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x10
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    s_add_i32 s3, s3, 1
+; GFX900-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GFX900-NEXT:    v_mov_b32_e32 v4, s9
+; GFX900-NEXT:    v_add_u32_e32 v2, s3, v1
+; GFX900-NEXT:    v_mul_lo_u32 v2, v2, v0
+; GFX900-NEXT:    v_add_u32_e32 v1, 1, v1
+; GFX900-NEXT:    s_load_dword s3, s[6:7], 0x4
+; GFX900-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX900-NEXT:    v_mul_lo_u32 v3, v2, v1
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    s_and_b32 s3, s3, 0xffff
+; GFX900-NEXT:    s_mul_i32 s2, s2, s3
+; GFX900-NEXT:    v_add_u32_e32 v1, v3, v1
+; GFX900-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX900-NEXT:    v_add_u32_e32 v2, 1, v3
+; GFX900-NEXT:    v_add_u32_e32 v0, s2, v0
+; GFX900-NEXT:    v_mov_b32_e32 v5, s1
+; GFX900-NEXT:    v_mul_lo_u32 v3, v1, v2
+; GFX900-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX900-NEXT:    v_add_u32_e32 v2, v3, v2
+; GFX900-NEXT:    v_mul_lo_u32 v2, v2, v1
+; GFX900-NEXT:    v_add_u32_e32 v1, 1, v3
+; GFX900-NEXT:    v_mul_lo_u32 v3, v2, v1
+; GFX900-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v5, vcc
+; GFX900-NEXT:    v_add_u32_e32 v2, 1, v2
+; GFX900-NEXT:    v_lshlrev_b64 v[0:1], 2, v[0:1]
+; GFX900-NEXT:    v_mul_lo_u32 v2, v3, v2
+; GFX900-NEXT:    v_add_co_u32_e32 v0, vcc, s8, v0
+; GFX900-NEXT:    v_addc_co_u32_e32 v1, vcc, v4, v1, vcc
+; GFX900-NEXT:    global_store_dword v[0:1], v2, off
+; GFX900-NEXT:    s_endpgm
+;
+; GFX90A-LABEL: compute_mad:
+; GFX90A:       ; %bb.0: ; %bb
+; GFX90A-NEXT:    s_load_dword s3, s[0:1], 0x18
+; GFX90A-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX90A-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x10
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    s_add_i32 s3, s3, 1
+; GFX90A-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GFX90A-NEXT:    v_add_u32_e32 v2, s3, v1
+; GFX90A-NEXT:    v_mul_lo_u32 v2, v2, v0
+; GFX90A-NEXT:    v_add_u32_e32 v1, 1, v1
+; GFX90A-NEXT:    s_load_dword s3, s[6:7], 0x4
+; GFX90A-NEXT:    v_mul_lo_u32 v3, v2, v1
+; GFX90A-NEXT:    v_add_u32_e32 v1, v3, v1
+; GFX90A-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX90A-NEXT:    v_add_u32_e32 v2, 1, v3
+; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90A-NEXT:    v_mul_lo_u32 v3, v1, v2
+; GFX90A-NEXT:    v_add_u32_e32 v2, v3, v2
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    s_and_b32 s3, s3, 0xffff
+; GFX90A-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX90A-NEXT:    v_add_u32_e32 v2, 1, v3
+; GFX90A-NEXT:    s_mul_i32 s2, s2, s3
+; GFX90A-NEXT:    v_add_u32_e32 v3, 1, v1
+; GFX90A-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX90A-NEXT:    v_add_u32_e32 v0, s2, v0
+; GFX90A-NEXT:    v_mul_lo_u32 v2, v1, v3
+; GFX90A-NEXT:    v_mov_b32_e32 v1, s1
+; GFX90A-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX90A-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX90A-NEXT:    v_lshlrev_b64 v[0:1], 2, v[0:1]
+; GFX90A-NEXT:    v_mov_b32_e32 v3, s9
+; GFX90A-NEXT:    v_add_co_u32_e32 v0, vcc, s8, v0
+; GFX90A-NEXT:    v_addc_co_u32_e32 v1, vcc, v3, v1, vcc
+; GFX90A-NEXT:    global_store_dword v[0:1], v2, off
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX10-LABEL: compute_mad:
+; GFX10:       ; %bb.0: ; %bb
+; GFX10-NEXT:    s_clause 0x2
+; GFX10-NEXT:    s_load_dword s3, s[0:1], 0x18
+; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x10
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_add_i32 s3, s3, 1
+; GFX10-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GFX10-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, s3, v1
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v1
+; GFX10-NEXT:    s_load_dword s3, s[6:7], 0x4
+; GFX10-NEXT:    v_mul_lo_u32 v2, v2, v0
+; GFX10-NEXT:    v_mul_lo_u32 v3, v2, v1
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, v3, v1
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_and_b32 s3, s3, 0xffff
+; GFX10-NEXT:    v_mul_lo_u32 v2, v1, v2
+; GFX10-NEXT:    v_add_nc_u32_e32 v1, 1, v3
+; GFX10-NEXT:    v_mul_lo_u32 v3, v2, v1
+; GFX10-NEXT:    v_add_nc_u32_e32 v4, v3, v1
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], null, s2, s3, v[0:1]
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, 1, v3
+; GFX10-NEXT:    v_mul_lo_u32 v2, v4, v2
+; GFX10-NEXT:    v_add_co_u32 v0, s2, s4, v0
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, null, s5, 0, s2
+; GFX10-NEXT:    v_mul_lo_u32 v3, v2, v3
+; GFX10-NEXT:    v_add_nc_u32_e32 v2, 1, v2
+; GFX10-NEXT:    v_lshlrev_b64 v[0:1], 2, v[0:1]
+; GFX10-NEXT:    v_mul_lo_u32 v2, v3, v2
+; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, s0, v0
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT:    global_store_dword v[0:1], v2, off
+; GFX10-NEXT:    s_endpgm
+bb:
+  %i = tail call i32 @llvm.amdgcn.workitem.id.x(), !range !0
+  %i2 = add i32 %arg1, 1
+  %i3 = mul i32 %i2, %i
+  %i4 = add i32 %i3, %i2
+  %i5 = mul i32 %i4, %i
+  %i6 = add i32 %i3, 1
+  %i7 = mul i32 %i5, %i6
+  %i8 = add i32 %i7, %i6
+  %i9 = mul i32 %i8, %i5
+  %i10 = add i32 %i7, 1
+  %i11 = mul i32 %i9, %i10
+  %i12 = add i32 %i11, %i10
+  %i13 = mul i32 %i12, %i9
+  %i14 = add i32 %i11, 1
+  %i15 = add i32 %i13, 1
+  %i16 = mul i32 %i13, %i14
+  %i17 = mul i32 %i16, %i15
+  %i19 = load i64, ptr addrspace(4) %i18, align 8
+  %i20 = tail call i32 @llvm.amdgcn.workgroup.id.x()
+  %i22 = getelementptr i8, ptr addrspace(4) %i21, i64 4
+  %i23 = load i16, ptr addrspace(4) %i22, align 4
+  %i24 = zext i16 %i23 to i32
+  %i25 = mul i32 %i20, %i24
+  %i26 = add i32 %i25, %i
+  %i27 = zext i32 %i26 to i64
+  %i28 = add i64 %i19, %i27
+  %i29 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %i28
+  store i32 %i17, ptr addrspace(1) %i29, align 4
+  ret void
+}
+
+define amdgpu_ps i32 @s_mul_add_1_i32(i32 inreg %x, i32 inreg %y) {
+; GFX67-LABEL: s_mul_add_1_i32:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_add_i32 s1, s1, 1
+; GFX67-NEXT:    s_mul_i32 s0, s0, s1
+; GFX67-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_mul_add_1_i32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_add_i32 s1, s1, 1
+; GFX8-NEXT:    s_mul_i32 s0, s0, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_mul_add_1_i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_add_i32 s1, s1, 1
+; GFX9-NEXT:    s_mul_i32 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX10-LABEL: s_mul_add_1_i32:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_add_i32 s1, s1, 1
+; GFX10-NEXT:    s_mul_i32 s0, s0, s1
+; GFX10-NEXT:    ; return to shader part epilog
+  %add = add i32 %y, 1
+  %mul = mul i32 %x, %add
+  ret i32 %mul
+}
+
+define amdgpu_ps i32 @s_mul_add_1_i32_commute(i32 inreg %x, i32 inreg %y) {
+; GFX67-LABEL: s_mul_add_1_i32_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_add_i32 s1, s1, 1
+; GFX67-NEXT:    s_mul_i32 s0, s1, s0
+; GFX67-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_mul_add_1_i32_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_add_i32 s1, s1, 1
+; GFX8-NEXT:    s_mul_i32 s0, s1, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_mul_add_1_i32_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_add_i32 s1, s1, 1
+; GFX9-NEXT:    s_mul_i32 s0, s1, s0
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX10-LABEL: s_mul_add_1_i32_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_add_i32 s1, s1, 1
+; GFX10-NEXT:    s_mul_i32 s0, s1, s0
+; GFX10-NEXT:    ; return to shader part epilog
+  %add = add i32 %y, 1
+  %mul = mul i32 %add, %x
+  ret i32 %mul
+}
+
+define i8 @v_mul_add_1_i8(i8 %x, i8 %y) {
+; GFX67-LABEL: v_mul_add_1_i8:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i8:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v1, v1, 1
+; GFX10-NEXT:    v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i8 %y, 1
+  %mul = mul i8 %x, %add
+  ret i8 %mul
+}
+
+define i8 @v_mul_add_1_i8_commute(i8 %x, i8 %y) {
+; GFX67-LABEL: v_mul_add_1_i8_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i8_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i8_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i8_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v1, v1, 1
+; GFX10-NEXT:    v_mul_lo_u16 v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i8 %y, 1
+  %mul = mul i8 %add, %x
+  ret i8 %mul
+}
+
+define i8 @v_mul_add_1_i8_zext(i8 zeroext %x, i8 zeroext %y) {
+; GFX67-LABEL: v_mul_add_1_i8_zext:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i8_zext:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i8_zext:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i8_zext:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v1, v1, 1
+; GFX10-NEXT:    v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i8 %y, 1
+  %mul = mul i8 %x, %add
+  ret i8 %mul
+}
+
+define i8 @v_mul_add_1_i8_zext_commute(i8 zeroext %x, i8 zeroext %y) {
+; GFX67-LABEL: v_mul_add_1_i8_zext_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v1, vcc, 1, v1
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v1, v0
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_i8_zext_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_i8_zext_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v1, 1, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_i8_zext_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v1, v1, 1
+; GFX10-NEXT:    v_mul_lo_u16 v0, v1, v0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i8 %y, 1
+  %mul = mul i8 %add, %x
+  ret i8 %mul
+}
+
+define <2 x i8> @v_mul_add_1_v2i8(<2 x i8> %x, <2 x i8> %y) {
+; GFX67-LABEL: v_mul_add_1_v2i8:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 1, v2
+; GFX67-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xff, v2
+; GFX67-NEXT:    v_or_b32_e32 v2, v3, v2
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 0x100, v2
+; GFX67-NEXT:    v_bfe_u32 v3, v2, 8, 8
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xff, v2
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v2
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v1, v3
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_v2i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v3, 1, v3
+; GFX8-NEXT:    v_add_u16_e32 v2, 1, v2
+; GFX8-NEXT:    v_mul_lo_u16_e32 v1, v1, v3
+; GFX8-NEXT:    v_lshlrev_b16_e32 v3, 8, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v0, v2
+; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_v2i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v3, 1, v3
+; GFX9-NEXT:    v_add_u16_e32 v2, 1, v2
+; GFX9-NEXT:    v_mul_lo_u16_e32 v1, v1, v3
+; GFX9-NEXT:    v_lshlrev_b16_e32 v3, 8, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v0, v2
+; GFX9-NEXT:    v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_v2i8:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v3, v3, 1
+; GFX10-NEXT:    v_add_nc_u16 v2, v2, 1
+; GFX10-NEXT:    v_mul_lo_u16 v1, v1, v3
+; GFX10-NEXT:    v_mul_lo_u16 v0, v0, v2
+; GFX10-NEXT:    v_lshlrev_b16 v2, 8, v1
+; GFX10-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT:    v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add <2 x i8> %y, <i8 1, i8 1>
+  %mul = mul <2 x i8> %x, %add
+  ret <2 x i8> %mul
+}
+
+define <2 x i8> @v_mul_add_1_v2i8_commute(<2 x i8> %x, <2 x i8> %y) {
+; GFX67-LABEL: v_mul_add_1_v2i8_commute:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 1, v2
+; GFX67-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xff, v2
+; GFX67-NEXT:    v_or_b32_e32 v2, v3, v2
+; GFX67-NEXT:    v_add_i32_e32 v2, vcc, 0x100, v2
+; GFX67-NEXT:    v_bfe_u32 v3, v2, 8, 8
+; GFX67-NEXT:    v_and_b32_e32 v2, 0xff, v2
+; GFX67-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX67-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v2, v0
+; GFX67-NEXT:    v_mul_u32_u24_e32 v1, v3, v1
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_mul_add_1_v2i8_commute:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_add_u16_e32 v3, 1, v3
+; GFX8-NEXT:    v_add_u16_e32 v2, 1, v2
+; GFX8-NEXT:    v_mul_lo_u16_e32 v1, v3, v1
+; GFX8-NEXT:    v_lshlrev_b16_e32 v3, 8, v1
+; GFX8-NEXT:    v_mul_lo_u16_e32 v0, v2, v0
+; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_mul_add_1_v2i8_commute:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u16_e32 v3, 1, v3
+; GFX9-NEXT:    v_add_u16_e32 v2, 1, v2
+; GFX9-NEXT:    v_mul_lo_u16_e32 v1, v3, v1
+; GFX9-NEXT:    v_lshlrev_b16_e32 v3, 8, v1
+; GFX9-NEXT:    v_mul_lo_u16_e32 v0, v2, v0
+; GFX9-NEXT:    v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_mul_add_1_v2i8_commute:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    v_add_nc_u16 v3, v3, 1
+; GFX10-NEXT:    v_add_nc_u16 v2, v2, 1
+; GFX10-NEXT:    v_mul_lo_u16 v1, v3, v1
+; GFX10-NEXT:    v_mul_lo_u16 v0, v2, v0
+; GFX10-NEXT:    v_lshlrev_b16 v2, 8, v1
+; GFX10-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT:    v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+  %add = add <2 x i8> %y, <i8 1, i8 1>
+  %mul = mul <2 x i8> %add, %x
+  ret <2 x i8> %mul
+}
+
+declare align 4 ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() #2
+declare i32 @llvm.amdgcn.workitem.id.x() #2
+declare align 4 ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() #2
+declare i32 @llvm.amdgcn.workgroup.id.x() #2
+
+attributes #0 = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) }
+attributes #1 = { mustprogress nofree nosync nounwind willreturn memory(read, argmem: readwrite, inaccessiblemem: none) }
+attributes #2 = { mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+
+!0 = !{i32 0, i32 1024}


        


More information about the llvm-commits mailing list