[llvm] [RISCV][VLOPT] Add support for more instructions in vl-opt-op-info.mir (PR #119416)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 10 12:51:44 PST 2024
================
@@ -483,3 +513,195 @@ body: |
%x:vr = PseudoVADD_VV_MF4 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVNSRL_WV_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
...
+---
+name: vmerge_vim
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmerge_vim
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vim_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmerge_vim_incompatible_eew
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+ %y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vim_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmerge_vim_incompatible_emul
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VIM_MF2 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vrnov0 = PseudoVMERGE_VIM_MF2 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vxm
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmerge_vxm
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: %z:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, %y, $v0, 1, 3 /* e8 */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:gpr = ADDI $x0, 1
+ %z:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, %y, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vxm_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmerge_vxm_incompatible_eew
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: %z:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, %y, $v0, 1, 3 /* e8 */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+ %y:gpr = ADDI $x0, 1
+ %z:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, %y, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vxm_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmerge_vxm_incompatible_emul
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: %z:vrnov0 = PseudoVMERGE_VXM_MF2 $noreg, %x, %y, $v0, 1, 3 /* e8 */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:gpr = ADDI $x0, 1
+ %z:vrnov0 = PseudoVMERGE_VXM_MF2 $noreg, %x, %y, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vvm
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmerge_vvm
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vvm_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmerge_vvm_incompatible_eew
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+ %y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vvm_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmerge_vvm_incompatible_emul
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VVM_MF2 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vrnov0 = PseudoVMERGE_VVM_MF2 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+...
+---
+name: vmv_v_i
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmv_v_i
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_i_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmv_v_i_incompatible_eew
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+ %y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 4 /* e16 */, 0
+...
+---
+name: vmv_v_i_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmv_v_i_incompatible_emul
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVMV_V_I_MF2 %x, 9, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVMV_V_I_MF2 %x, 9, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_x
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmv_v_x
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: %z:vr = PseudoVMV_V_X_M1 %x, %y, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:gpr = ADDI $x0, 1
+ %z:vr = PseudoVMV_V_X_M1 %x, %y, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_x_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmv_v_x_incompatible_eew
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: %z:vr = PseudoVMV_V_X_M1 %x, %y, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+ %y:gpr = ADDI $x0, 1
+ %z:vr = PseudoVMV_V_X_M1 %x, %y, 1, 4 /* e16 */, 0
+...
+---
+name: vmv_v_x_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmv_v_x_incompatible_emul
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: %z:vr = PseudoVMV_V_X_MF2 %x, %y, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:gpr = ADDI $x0, 1
+ %z:vr = PseudoVMV_V_X_MF2 %x, %y, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_v
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmv_v_v
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVMV_V_I_M1 $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVMV_V_I_M1 $noreg, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_v_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmv_v_v_incompatible_eew
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVMV_V_V_M1 %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+ %y:vr = PseudoVMV_V_V_M1 %x, $noreg, 1, 4 /* e16 */, 0
----------------
topperc wrote:
This is testing the pasthru operand which can never fold regardless of the EEW being incompatible
https://github.com/llvm/llvm-project/pull/119416
More information about the llvm-commits
mailing list