[llvm] 0c94915 - [RISCV] Use _B* suffix for vector mask logic pseudo instructions. (#119787)

via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 12 21:11:06 PST 2024


Author: Craig Topper
Date: 2024-12-12T21:11:01-08:00
New Revision: 0c94915d34e6934c04140bb908364e54d1bc8ada

URL: https://github.com/llvm/llvm-project/commit/0c94915d34e6934c04140bb908364e54d1bc8ada
DIFF: https://github.com/llvm/llvm-project/commit/0c94915d34e6934c04140bb908364e54d1bc8ada.diff

LOG: [RISCV] Use _B* suffix for vector mask logic pseudo instructions. (#119787)

Replace LMUL suffixes with _B1, _B2, etc. This matches what we do
for other mask only instructions like VCPOP_M, VFIRST_M, VMSBF_M,
VLM, VSM, etc.

Now all pseudoinstructions that use Log2SEW=0 will be consistently
named.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 7ae68ebadd3e85..f43c120dc1946a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1670,7 +1670,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
     VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix                 \
                              : RISCV::PseudoVMSGT_VX_##suffix;                 \
-    VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix;                            \
+    VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix_b;                          \
     VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b;                             \
     break;
         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B64)
@@ -1770,13 +1770,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix;                            \
     VMOROpcode = RISCV::PseudoVMOR_MM_##suffix;                                \
     break;
-        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
-        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
-        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
-        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
-        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
-        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
-        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
+        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, B64)
+        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, B32)
+        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, B16)
+        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, B8)
+        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, B4)
+        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, B2)
+        CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, B1)
 #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
       }
       SDValue SEW = CurDAG->getTargetConstant(

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 4e25d683faee9d..81b93ade925761 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2273,11 +2273,10 @@ multiclass VPseudoBinaryV_VI_RM<Operand ImmType, LMULInfo m, string Constraint =
 }
 
 multiclass VPseudoVALU_MM<bit Commutable = 0> {
-  foreach m = MxList in {
-    defvar mx = m.MX;
-    let VLMul = m.value, isCommutable = Commutable in {
-      def "_MM_" # mx : VPseudoBinaryNoMask<VR, VR, VR, "">,
-                        SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mx>;
+  foreach mti = AllMasks in {
+    let VLMul = mti.LMul.value, isCommutable = Commutable in {
+      def "_MM_" # mti.BX : VPseudoBinaryNoMask<VR, VR, VR, "">,
+        SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mti.LMul.MX>;
     }
   }
 }
@@ -4950,7 +4949,7 @@ multiclass VPatBinaryV_VI_RM<string intrinsic, string instruction,
 multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
   foreach mti = AllMasks in
     let Predicates = [HasVInstructions] in
-    def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.LMul.MX,
+    def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.BX,
                       mti.Mask, mti.Mask, mti.Mask,
                       mti.Log2SEW, VR, VR>;
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 021c4b3b724b02..880ea0ae0a976c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -1141,35 +1141,35 @@ defm : VPatAVGADD_VV_VX_RM<avgceilu, 0b00, suffix = "U">;
 foreach mti = AllMasks in {
   let Predicates = [HasVInstructions] in {
     def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)),
-              (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMAND_MM_"#mti.BX)
                    VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
     def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)),
-              (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMOR_MM_"#mti.BX)
                    VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
     def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)),
-              (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMXOR_MM_"#mti.BX)
                    VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
 
     def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))),
-              (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMNAND_MM_"#mti.BX)
                    VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
     def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))),
-              (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMNOR_MM_"#mti.BX)
                    VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
     def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))),
-              (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.BX)
                    VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
 
     def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))),
-              (!cast<Instruction>("PseudoVMANDN_MM_"#mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMANDN_MM_"#mti.BX)
                    VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
     def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))),
-              (!cast<Instruction>("PseudoVMORN_MM_"#mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMORN_MM_"#mti.BX)
                    VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
 
     // Handle rvv_vnot the same as the vmnot.m pseudoinstruction.
     def : Pat<(mti.Mask (rvv_vnot VR:$rs)),
-              (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMNAND_MM_"#mti.BX)
                    VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>;
   }
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index e48a6f9309294b..2026ba79e623d8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -2699,51 +2699,51 @@ foreach mti = AllMasks in {
               (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
 
     def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)),
-              (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMAND_MM_" # mti.BX)
                    VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
     def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
-              (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMOR_MM_" # mti.BX)
                    VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
     def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
-              (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMXOR_MM_" # mti.BX)
                    VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
 
     def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1,
                                         (riscv_vmnot_vl VR:$rs2, VLOpFrag),
                                         VLOpFrag)),
-              (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMANDN_MM_" # mti.BX)
                    VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
     def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1,
                                        (riscv_vmnot_vl VR:$rs2, VLOpFrag),
                                        VLOpFrag)),
-              (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMORN_MM_" # mti.BX)
                    VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
     // XOR is associative so we need 2 patterns for VMXNOR.
     def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1,
                                                         VLOpFrag),
                                        VR:$rs2, VLOpFrag)),
-              (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.BX)
                    VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
 
     def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2,
                                                         VLOpFrag),
                                         VLOpFrag)),
-              (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMNAND_MM_" # mti.BX)
                    VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
     def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2,
                                                        VLOpFrag),
                                         VLOpFrag)),
-              (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMNOR_MM_" # mti.BX)
                    VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
     def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2,
                                                         VLOpFrag),
                                         VLOpFrag)),
-              (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.BX)
                    VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
 
     // Match the not idiom to the vmnot.m pseudo.
     def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)),
-              (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
+              (!cast<Instruction>("PseudoVMNAND_MM_" # mti.BX)
                    VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>;
 
     // 15.2 Vector count population in mask vcpop.m

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 912fc2ccbd1270..5f23823d101031 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -547,19 +547,19 @@ name: vmop_mm
 body: |
   bb.0:
     ; CHECK-LABEL: name: vmop_mm
-    ; CHECK: %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, 1, 0 /* e8 */
-    ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0 /* e8 */
-    %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
-    %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0
+    ; CHECK: %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */
+    ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 /* e8 */
+    %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
+    %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0
 ...
 ---
 name: vmop_mm_incompatible_eew
 body: |
   bb.0:
     ; CHECK-LABEL: name: vmop_mm_incompatible_eew
-    ; CHECK: %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 /* e8 */
+    ; CHECK: %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e8 */
     ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
-    %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
+    %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
     %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
 ...
 ---
@@ -567,19 +567,19 @@ name: vmop_mm_incompatible_emul
 body: |
   bb.0:
     ; CHECK-LABEL: name: vmop_mm_incompatible_emul
-    ; CHECK: %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 /* e8 */
-    ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0 /* e8 */
-    %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
-    %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0
+    ; CHECK: %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e8 */
+    ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 /* e8 */
+    %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
+    %y:vr = PseudoVMAND_MM_B16  $noreg, %x, 1, 0
 ...
 ---
 name: vmop_mm_mask
 body: |
   bb.0:
     ; CHECK-LABEL: name: vmop_mm_mask
-    ; CHECK: %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, 1, 0 /* e8 */
+    ; CHECK: %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */
     ; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
-    %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
+    %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
     %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
 ...
 ---
@@ -587,9 +587,9 @@ name: vmop_mm_mask_larger_emul_user
 body: |
   bb.0:
     ; CHECK-LABEL: name: vmop_mm_mask_larger_emul_user
-    ; CHECK: %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, 1, 0 /* e8 */
+    ; CHECK: %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */
     ; CHECK-NEXT: %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
-    %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
+    %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
     %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0
 ...
 ---
@@ -597,9 +597,9 @@ name: vmop_mm_mask_incompatible_emul
 body: |
   bb.0:
     ; CHECK-LABEL: name: vmop_mm_mask_incompatible_emul
-    ; CHECK: %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 /* e8 */
+    ; CHECK: %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e8 */
     ; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
-    %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
+    %x:vmv0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
     %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
 ...
 ---
@@ -608,9 +608,9 @@ body: |
   bb.0:
     ; CHECK-LABEL: name: vmop_vv
     ; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 3 /* e8 */
-    ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0 /* e8 */
+    ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 /* e8 */
     %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
-    %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0
+    %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0
 ...
 ---
 name: vmop_vv_maskuser
@@ -638,9 +638,9 @@ body: |
   bb.0:
     ; CHECK-LABEL: name: vmop_vv_incompatible_emul
     ; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
-    ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0 /* e8 */
+    ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 /* e8 */
     %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 3 /* e8 */
-    %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0
+    %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0
 ...
 ---
 name: vmop_vv_maskuser_incompaible_emul

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
index 6f97abcd0fadec..449ee44d5dc5ed 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
@@ -822,7 +822,7 @@ body:             |
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %mask:vr = PseudoVMANDN_MM_MF8 %t6, %t3, -1, 0 /* e8 */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   %mask:vr = PseudoVMANDN_MM_B64 %t6, %t3, -1, 0 /* e8 */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   BEQ %a, $x0, %bb.3
   ; CHECK-NEXT:   PseudoBR %bb.2
   ; CHECK-NEXT: {{  $}}
@@ -857,7 +857,7 @@ body:             |
   bb.1:
     successors: %bb.3, %bb.2
 
-    %mask:vr = PseudoVMANDN_MM_MF8 %t6, %t3, -1, 0
+    %mask:vr = PseudoVMANDN_MM_B64 %t6, %t3, -1, 0
     %t2:gpr = COPY $x0
     BEQ %a, %t2, %bb.3
     PseudoBR %bb.2


        


More information about the llvm-commits mailing list