[llvm] 904849f - [RISCV][VLOPT] Add support for more instructions in vl-opt-op-info.mir (#119416)

via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 17 09:57:33 PST 2024


Author: Michael Maitland
Date: 2024-12-17T12:57:29-05:00
New Revision: 904849f2973eb8ab517f8a805cf8a747924220ef

URL: https://github.com/llvm/llvm-project/commit/904849f2973eb8ab517f8a805cf8a747924220ef
DIFF: https://github.com/llvm/llvm-project/commit/904849f2973eb8ab517f8a805cf8a747924220ef.diff

LOG: [RISCV][VLOPT] Add support for more instructions in vl-opt-op-info.mir (#119416)

Specifically, some more where EMUL=LMUL and EEW=SEW.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
    llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 35274c25da856a..faa41ec61cd127 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -2594,71 +2594,6 @@ define <vscale x 4 x i32> @vwsll_vi(<vscale x 4 x i16> %a, <vscale x 4 x i32> %b
   ret <vscale x 4 x i32> %2
 }
 
-; Test getOperandInfo
-
-define <vscale x 1 x i8> @vmerge_vim(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x i1> %m, iXLen %vl) {
-; NOVLOPT-LABEL: vmerge_vim:
-; NOVLOPT:       # %bb.0:
-; NOVLOPT-NEXT:    vsetvli a2, zero, e8, mf8, tu, ma
-; NOVLOPT-NEXT:    vmv.v.x v8, a0
-; NOVLOPT-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; NOVLOPT-NEXT:    vmerge.vim v8, v8, 2, v0
-; NOVLOPT-NEXT:    ret
-;
-; VLOPT-LABEL: vmerge_vim:
-; VLOPT:       # %bb.0:
-; VLOPT-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
-; VLOPT-NEXT:    vmv.v.x v8, a0
-; VLOPT-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
-; VLOPT-NEXT:    vmerge.vim v8, v8, 2, v0
-; VLOPT-NEXT:    ret
-  %2 = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(<vscale x 1 x i8> %a, i8 %b, iXLen -1)
-  %3 = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %2, i8 2, <vscale x 1 x i1> %m, iXLen %vl)
-  ret <vscale x 1 x i8> %3
-}
-
-define <vscale x 1 x i8> @vmerge_vxm(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x i1> %m, iXLen %vl) {
-; NOVLOPT-LABEL: vmerge_vxm:
-; NOVLOPT:       # %bb.0:
-; NOVLOPT-NEXT:    vsetvli a2, zero, e8, mf8, tu, ma
-; NOVLOPT-NEXT:    vmv.v.x v8, a0
-; NOVLOPT-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; NOVLOPT-NEXT:    vmerge.vxm v8, v8, a0, v0
-; NOVLOPT-NEXT:    ret
-;
-; VLOPT-LABEL: vmerge_vxm:
-; VLOPT:       # %bb.0:
-; VLOPT-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
-; VLOPT-NEXT:    vmv.v.x v8, a0
-; VLOPT-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
-; VLOPT-NEXT:    vmerge.vxm v8, v8, a0, v0
-; VLOPT-NEXT:    ret
-  %2 = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(<vscale x 1 x i8> %a, i8 %b, iXLen -1)
-  %3 = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %2, i8 %b, <vscale x 1 x i1> %m, iXLen %vl)
-  ret <vscale x 1 x i8> %3
-}
-
-define <vscale x 1 x i8> @vmerge_vvm(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, iXLen %vl) {
-; NOVLOPT-LABEL: vmerge_vvm:
-; NOVLOPT:       # %bb.0:
-; NOVLOPT-NEXT:    vsetvli a2, zero, e8, mf8, tu, ma
-; NOVLOPT-NEXT:    vmv.v.x v8, a0
-; NOVLOPT-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; NOVLOPT-NEXT:    vmerge.vvm v8, v8, v9, v0
-; NOVLOPT-NEXT:    ret
-;
-; VLOPT-LABEL: vmerge_vvm:
-; VLOPT:       # %bb.0:
-; VLOPT-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
-; VLOPT-NEXT:    vmv.v.x v8, a0
-; VLOPT-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
-; VLOPT-NEXT:    vmerge.vvm v8, v8, v9, v0
-; VLOPT-NEXT:    ret
-  %2 = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(<vscale x 1 x i8> %a, i8 %b, iXLen -1)
-  %3 = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %2, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, iXLen %vl)
-  ret <vscale x 1 x i8> %3
-}
-
 define <vscale x 1 x i32> @vmand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
 ; NOVLOPT-LABEL: vmand_mm:
 ; NOVLOPT:       # %bb.0:
@@ -2950,4 +2885,3 @@ define <vscale x 1 x i32> @vmsof_m(<vscale x 1 x i1> %a, <vscale x 1 x i32> %c,
   %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
   ret <vscale x 1 x i32> %3
 }
-

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 5f23823d101031..8587ec136afd83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -1,6 +1,36 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
 # RUN: llc %s -o - -mtriple=riscv64 -mattr=+v -run-pass=riscv-vl-optimizer -verify-machineinstrs | FileCheck %s
 
+---
+name: vop_vi
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vop_vi
+    ; CHECK: %x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, 1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, -1, 3 /* e8 */, 0
+    %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vop_vi_incompatible_eew
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vop_vi_incompatible_eew
+    ; CHECK: %x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, -1, 3 /* e8 */, 0
+    %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+...
+---
+name: vop_vi_incompatible_emul
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vop_vi_incompatible_emul
+    ; CHECK: %x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VI_M1 $noreg, $noreg, 9, -1, 3 /* e8 */, 0
+    %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
 ---
 name: vop_vv
 body: |
@@ -682,3 +712,183 @@ body: |
     %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
     %y:vr = PseudoVMSEQ_VV_MF2 $noreg, %x, 1, 3 /* e8 */
 ...
+---
+name: vmerge_vim
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmerge_vim
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vim_incompatible_eew
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmerge_vim_incompatible_eew
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+    %y:vrnov0 = PseudoVMERGE_VIM_M1 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vim_incompatible_emul
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmerge_vim_incompatible_emul
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VIM_MF2 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vrnov0 = PseudoVMERGE_VIM_MF2 $noreg, %x, 9, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vxm
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmerge_vxm
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vxm_incompatible_eew
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmerge_vxm_incompatible_eew
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+    %y:vrnov0 = PseudoVMERGE_VXM_M1 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vxm_incompatible_emul
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmerge_vxm_incompatible_emul
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VXM_MF2 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vrnov0 = PseudoVMERGE_VXM_MF2 $noreg, %x, $noreg, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vvm
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmerge_vvm
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vvm_incompatible_eew
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmerge_vvm_incompatible_eew
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+    %y:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+...
+---
+name: vmerge_vvm_incompatible_emul
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmerge_vvm_incompatible_emul
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vrnov0 = PseudoVMERGE_VVM_MF2 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vrnov0 = PseudoVMERGE_VVM_MF2 $noreg, $noreg, %x, $v0, 1, 3 /* e8 */
+...
+---
+name: vmv_v_i
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmv_v_i
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 3 /* e8 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_i_incompatible_eew
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmv_v_i_incompatible_eew
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 3 /* e8 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+    %y:vr = PseudoVMV_V_I_M1 %x, 9, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_i_incompatible_emul
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmv_v_i_incompatible_emul
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVMV_V_I_MF2 %x, 9, 1, 3 /* e8 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vr = PseudoVMV_V_I_MF2 %x, 9, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_x
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmv_v_x
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVMV_V_X_M1 %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vr = PseudoVMV_V_X_M1 %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_x_incompatible_eew
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmv_v_x_incompatible_eew
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVMV_V_X_M1 %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+    %y:vr = PseudoVMV_V_X_M1 %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_x_incompatible_emul
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmv_v_x_incompatible_emul
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVMV_V_X_MF2 %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vr = PseudoVMV_V_X_MF2 %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_v
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmv_v_v
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVMV_V_V_M1 $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vr = PseudoVMV_V_V_M1 $noreg, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_v_incompatible_eew
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmv_v_v_incompatible_eew
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVMV_V_V_M1 $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+    %y:vr = PseudoVMV_V_V_M1 $noreg, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vmv_v_v_incompatible_emul
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: vmv_v_v_incompatible_emul
+    ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVMV_V_V_MF2 $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+    %y:vr = PseudoVMV_V_V_MF2 $noreg, %x, 1, 3 /* e8 */, 0
+...


        


More information about the llvm-commits mailing list