[llvm] a81dff1 - [RISCV] Support inline asm for vector instructions.

Hsiangkai Wang via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 14 20:03:06 PDT 2021


Author: Hsiangkai Wang
Date: 2021-03-15T11:02:18+08:00
New Revision: a81dff1e58390ccd438b9d5ffeb289783f2ccd29

URL: https://github.com/llvm/llvm-project/commit/a81dff1e58390ccd438b9d5ffeb289783f2ccd29
DIFF: https://github.com/llvm/llvm-project/commit/a81dff1e58390ccd438b9d5ffeb289783f2ccd29.diff

LOG: [RISCV] Support inline asm for vector instructions.

Types of fractional LMUL and LMUL=1 are all using VR register class. When
using inline asm, it will use the first type in the register class as the
type for the register. It is not necessary the same as the value type. We
need to use INSERT_SUBVECTOR/EXTRACT_SUBVECToR/BITCAST to make it legal
to put the value in the corresponding register class.

Differential Revision: https://reviews.llvm.org/D97480

Added: 
    llvm/test/CodeGen/RISCV/rvv/inline-asm.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/lib/Target/RISCV/RISCVRegisterInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index c567464c37cf..aa4863f3dcfb 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -6215,6 +6215,7 @@ RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
     default:
       break;
     case 'f':
+    case 'v':
       return C_RegisterClass;
     case 'I':
     case 'J':
@@ -6245,6 +6246,14 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
       if (Subtarget.hasStdExtD() && VT == MVT::f64)
         return std::make_pair(0U, &RISCV::FPR64RegClass);
       break;
+    case 'v':
+      for (const auto *RC :
+           {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
+            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
+        if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
+          return std::make_pair(0U, RC);
+      }
+      break;
     default:
       break;
     }
@@ -6344,6 +6353,56 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
     }
   }
 
+  if (Subtarget.hasStdExtV()) {
+    Register VReg = StringSwitch<Register>(Constraint.lower())
+                        .Case("{v0}", RISCV::V0)
+                        .Case("{v1}", RISCV::V1)
+                        .Case("{v2}", RISCV::V2)
+                        .Case("{v3}", RISCV::V3)
+                        .Case("{v4}", RISCV::V4)
+                        .Case("{v5}", RISCV::V5)
+                        .Case("{v6}", RISCV::V6)
+                        .Case("{v7}", RISCV::V7)
+                        .Case("{v8}", RISCV::V8)
+                        .Case("{v9}", RISCV::V9)
+                        .Case("{v10}", RISCV::V10)
+                        .Case("{v11}", RISCV::V11)
+                        .Case("{v12}", RISCV::V12)
+                        .Case("{v13}", RISCV::V13)
+                        .Case("{v14}", RISCV::V14)
+                        .Case("{v15}", RISCV::V15)
+                        .Case("{v16}", RISCV::V16)
+                        .Case("{v17}", RISCV::V17)
+                        .Case("{v18}", RISCV::V18)
+                        .Case("{v19}", RISCV::V19)
+                        .Case("{v20}", RISCV::V20)
+                        .Case("{v21}", RISCV::V21)
+                        .Case("{v22}", RISCV::V22)
+                        .Case("{v23}", RISCV::V23)
+                        .Case("{v24}", RISCV::V24)
+                        .Case("{v25}", RISCV::V25)
+                        .Case("{v26}", RISCV::V26)
+                        .Case("{v27}", RISCV::V27)
+                        .Case("{v28}", RISCV::V28)
+                        .Case("{v29}", RISCV::V29)
+                        .Case("{v30}", RISCV::V30)
+                        .Case("{v31}", RISCV::V31)
+                        .Default(RISCV::NoRegister);
+    if (VReg != RISCV::NoRegister) {
+      if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
+        return std::make_pair(VReg, &RISCV::VMRegClass);
+      if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
+        return std::make_pair(VReg, &RISCV::VRRegClass);
+      for (const auto *RC :
+           {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
+        if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
+          VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
+          return std::make_pair(VReg, RC);
+        }
+      }
+    }
+  }
+
   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
 }
 
@@ -6702,6 +6761,65 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
   return false;
 }
 
+bool RISCVTargetLowering::splitValueIntoRegisterParts(
+    SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
+    unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
+  EVT ValueVT = Val.getValueType();
+  if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
+    LLVMContext &Context = *DAG.getContext();
+    EVT ValueEltVT = ValueVT.getVectorElementType();
+    EVT PartEltVT = PartVT.getVectorElementType();
+    unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
+    unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
+    if (PartVTBitSize % ValueVTBitSize == 0) {
+      // If the element types are 
diff erent, bitcast to the same element type of
+      // PartVT first.
+      if (ValueEltVT != PartEltVT) {
+        unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
+        assert(Count != 0 && "The number of element should not be zero.");
+        EVT SameEltTypeVT =
+            EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
+        Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
+      }
+      Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
+                        Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
+      Parts[0] = Val;
+      return true;
+    }
+  }
+  return false;
+}
+
+SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
+    SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
+    MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
+  if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
+    LLVMContext &Context = *DAG.getContext();
+    SDValue Val = Parts[0];
+    EVT ValueEltVT = ValueVT.getVectorElementType();
+    EVT PartEltVT = PartVT.getVectorElementType();
+    unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
+    unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
+    if (PartVTBitSize % ValueVTBitSize == 0) {
+      EVT SameEltTypeVT = ValueVT;
+      // If the element types are 
diff erent, convert it to the same element type
+      // of PartVT.
+      if (ValueEltVT != PartEltVT) {
+        unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
+        assert(Count != 0 && "The number of element should not be zero.");
+        SameEltTypeVT =
+            EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
+      }
+      Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
+                        DAG.getConstant(0, DL, Subtarget.getXLenVT()));
+      if (ValueEltVT != PartEltVT)
+        Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
+      return Val;
+    }
+  }
+  return SDValue();
+}
+
 #define GET_REGISTER_MATCHER
 #include "RISCVGenAsmMatcher.inc"
 

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index fbc01b3ced03..dd0e72fcf84e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -406,6 +406,17 @@ class RISCVTargetLowering : public TargetLowering {
       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
       bool *Fast = nullptr) const override;
 
+  bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL,
+                                   SDValue Val, SDValue *Parts,
+                                   unsigned NumParts, MVT PartVT,
+                                   Optional<CallingConv::ID> CC) const override;
+
+  SDValue
+  joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
+                             const SDValue *Parts, unsigned NumParts,
+                             MVT PartVT, EVT ValueVT,
+                             Optional<CallingConv::ID> CC) const override;
+
   static RISCVVLMUL getLMUL(MVT VT);
   static unsigned getRegClassIDForLMUL(RISCVVLMUL LMul);
   static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);

diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 4f5a305f7af9..76f91b0a0377 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -466,22 +466,22 @@ class VReg<list<ValueType> regTypes, dag regList, int Vlmul>
   int Size = !mul(Vlmul, 64);
 }
 
-def VR : VReg<[vint8mf2_t, vint8mf4_t, vint8mf8_t,
+def VR : VReg<[vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t,
+               vfloat16m1_t, vfloat32m1_t, vfloat64m1_t,
+               vint8mf2_t, vint8mf4_t, vint8mf8_t,
                vint16mf2_t, vint16mf4_t, vint32mf2_t,
-               vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t,
-               vfloat16mf4_t, vfloat16mf2_t, vfloat16m1_t,
-               vfloat32mf2_t, vfloat32m1_t, vfloat64m1_t,
+               vfloat16mf4_t, vfloat16mf2_t, vfloat32mf2_t,
                vbool64_t, vbool32_t, vbool16_t, vbool8_t, vbool4_t,
                vbool2_t, vbool1_t],
            (add (sequence "V%u", 25, 31),
                 (sequence "V%u", 8, 24),
                 (sequence "V%u", 0, 7)), 1>;
 
-def VRNoV0 : VReg<[vint8mf2_t, vint8mf4_t, vint8mf8_t,
+def VRNoV0 : VReg<[vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t,
+                   vfloat16m1_t, vfloat32m1_t, vfloat64m1_t,
+                   vint8mf2_t, vint8mf4_t, vint8mf8_t,
                    vint16mf2_t, vint16mf4_t, vint32mf2_t,
-                   vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t,
-                   vfloat16mf4_t, vfloat16mf2_t, vfloat16m1_t,
-                   vfloat32mf2_t, vfloat32m1_t, vfloat64m1_t,
+                   vfloat16mf4_t, vfloat16mf2_t, vfloat32mf2_t,
                    vbool64_t, vbool32_t, vbool16_t, vbool8_t, vbool4_t,
                    vbool2_t, vbool1_t],
                (add (sequence "V%u", 25, 31),
@@ -521,6 +521,13 @@ def VMV0 : RegisterClass<"RISCV", VMaskVTs, 64, (add V0)> {
   let Size = 64;
 }
 
+// The register class is added for inline assembly for vector mask types.
+def VM : VReg<[vbool1_t, vbool2_t, vbool4_t, vbool8_t, vbool16_t,
+               vbool32_t, vbool64_t],
+           (add (sequence "V%u", 25, 31),
+                (sequence "V%u", 8, 24),
+                (sequence "V%u", 0, 7)), 1>;
+
 foreach m = LMULList.m in {
   foreach nf = NFList<m>.L in {
     def "VRN" # nf # "M" # m: VReg<[untyped],

diff  --git a/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
new file mode 100644
index 000000000000..624eb188a498
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
@@ -0,0 +1,410 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v < %s \
+; RUN:     --verify-machineinstrs | FileCheck %s
+
+define <vscale x 1 x i1> @test_1xi1(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2) nounwind {
+; CHECK-LABEL: test_1xi1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2)
+  ret <vscale x 1 x i1> %0
+}
+
+define <vscale x 2 x i1> @test_2xi1(<vscale x 2 x i1> %in, <vscale x 2 x i1> %in2) nounwind {
+; CHECK-LABEL: test_2xi1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 2 x i1> %in, <vscale x 2 x i1> %in2)
+  ret <vscale x 2 x i1> %0
+}
+
+define <vscale x 4 x i1> @test_4xi1(<vscale x 4 x i1> %in, <vscale x 4 x i1> %in2) nounwind {
+; CHECK-LABEL: test_4xi1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 4 x i1> %in, <vscale x 4 x i1> %in2)
+  ret <vscale x 4 x i1> %0
+}
+
+define <vscale x 8 x i1> @test_8xi1(<vscale x 8 x i1> %in, <vscale x 8 x i1> %in2) nounwind {
+; CHECK-LABEL: test_8xi1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 8 x i1> %in, <vscale x 8 x i1> %in2)
+  ret <vscale x 8 x i1> %0
+}
+
+define <vscale x 16 x i1> @test_16xi1(<vscale x 16 x i1> %in, <vscale x 16 x i1> %in2) nounwind {
+; CHECK-LABEL: test_16xi1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 16 x i1> %in, <vscale x 16 x i1> %in2)
+  ret <vscale x 16 x i1> %0
+}
+
+define <vscale x 32 x i1> @test_32xi1(<vscale x 32 x i1> %in, <vscale x 32 x i1> %in2) nounwind {
+; CHECK-LABEL: test_32xi1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 32 x i1> %in, <vscale x 32 x i1> %in2)
+  ret <vscale x 32 x i1> %0
+}
+
+define <vscale x 64 x i1> @test_64xi1(<vscale x 64 x i1> %in, <vscale x 64 x i1> %in2) nounwind {
+; CHECK-LABEL: test_64xi1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 64 x i1> %in, <vscale x 64 x i1> %in2)
+  ret <vscale x 64 x i1> %0
+}
+
+define <vscale x 1 x i64> @test_1xi64(<vscale x 1 x i64> %in, <vscale x 1 x i64> %in2) nounwind {
+; CHECK-LABEL: test_1xi64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i64> %in, <vscale x 1 x i64> %in2)
+  ret <vscale x 1 x i64> %0
+}
+
+define <vscale x 2 x i64> @test_2xi64(<vscale x 2 x i64> %in, <vscale x 2 x i64> %in2) nounwind {
+; CHECK-LABEL: test_2xi64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i64> %in, <vscale x 2 x i64> %in2)
+  ret <vscale x 2 x i64> %0
+}
+
+define <vscale x 4 x i64> @test_4xi64(<vscale x 4 x i64> %in, <vscale x 4 x i64> %in2) nounwind {
+; CHECK-LABEL: test_4xi64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i64> %in, <vscale x 4 x i64> %in2)
+  ret <vscale x 4 x i64> %0
+}
+
+define <vscale x 8 x i64> @test_8xi64(<vscale x 8 x i64> %in, <vscale x 8 x i64> %in2) nounwind {
+; CHECK-LABEL: test_8xi64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i64> %in, <vscale x 8 x i64> %in2)
+  ret <vscale x 8 x i64> %0
+}
+
+define <vscale x 1 x i32> @test_1xi32(<vscale x 1 x i32> %in, <vscale x 1 x i32> %in2) nounwind {
+; CHECK-LABEL: test_1xi32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i32> %in, <vscale x 1 x i32> %in2)
+  ret <vscale x 1 x i32> %0
+}
+
+define <vscale x 2 x i32> @test_2xi32(<vscale x 2 x i32> %in, <vscale x 2 x i32> %in2) nounwind {
+; CHECK-LABEL: test_2xi32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i32> %in, <vscale x 2 x i32> %in2)
+  ret <vscale x 2 x i32> %0
+}
+
+define <vscale x 4 x i32> @test_4xi32(<vscale x 4 x i32> %in, <vscale x 4 x i32> %in2) nounwind {
+; CHECK-LABEL: test_4xi32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i32> %in, <vscale x 4 x i32> %in2)
+  ret <vscale x 4 x i32> %0
+}
+
+define <vscale x 8 x i32> @test_8xi32(<vscale x 8 x i32> %in, <vscale x 8 x i32> %in2) nounwind {
+; CHECK-LABEL: test_8xi32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i32> %in, <vscale x 8 x i32> %in2)
+  ret <vscale x 8 x i32> %0
+}
+
+define <vscale x 16 x i32> @test_16xi32(<vscale x 16 x i32> %in, <vscale x 16 x i32> %in2) nounwind {
+; CHECK-LABEL: test_16xi32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 16 x i32> %in, <vscale x 16 x i32> %in2)
+  ret <vscale x 16 x i32> %0
+}
+
+define <vscale x 1 x i16> @test_1xi16(<vscale x 1 x i16> %in, <vscale x 1 x i16> %in2) nounwind {
+; CHECK-LABEL: test_1xi16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i16> %in, <vscale x 1 x i16> %in2)
+  ret <vscale x 1 x i16> %0
+}
+
+define <vscale x 2 x i16> @test_2xi16(<vscale x 2 x i16> %in, <vscale x 2 x i16> %in2) nounwind {
+; CHECK-LABEL: test_2xi16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i16> %in, <vscale x 2 x i16> %in2)
+  ret <vscale x 2 x i16> %0
+}
+
+define <vscale x 4 x i16> @test_4xi16(<vscale x 4 x i16> %in, <vscale x 4 x i16> %in2) nounwind {
+; CHECK-LABEL: test_4xi16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i16> %in, <vscale x 4 x i16> %in2)
+  ret <vscale x 4 x i16> %0
+}
+
+define <vscale x 8 x i16> @test_8xi16(<vscale x 8 x i16> %in, <vscale x 8 x i16> %in2) nounwind {
+; CHECK-LABEL: test_8xi16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i16> %in, <vscale x 8 x i16> %in2)
+  ret <vscale x 8 x i16> %0
+}
+
+define <vscale x 16 x i16> @test_16xi16(<vscale x 16 x i16> %in, <vscale x 16 x i16> %in2) nounwind {
+; CHECK-LABEL: test_16xi16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 16 x i16> %in, <vscale x 16 x i16> %in2)
+  ret <vscale x 16 x i16> %0
+}
+
+define <vscale x 32 x i16> @test_32xi16(<vscale x 32 x i16> %in, <vscale x 32 x i16> %in2) nounwind {
+; CHECK-LABEL: test_32xi16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 32 x i16> %in, <vscale x 32 x i16> %in2)
+  ret <vscale x 32 x i16> %0
+}
+
+define <vscale x 1 x i8> @test_1xi8(<vscale x 1 x i8> %in, <vscale x 1 x i8> %in2) nounwind {
+; CHECK-LABEL: test_1xi8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i8> %in, <vscale x 1 x i8> %in2)
+  ret <vscale x 1 x i8> %0
+}
+
+define <vscale x 2 x i8> @test_2xi8(<vscale x 2 x i8> %in, <vscale x 2 x i8> %in2) nounwind {
+; CHECK-LABEL: test_2xi8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i8> %in, <vscale x 2 x i8> %in2)
+  ret <vscale x 2 x i8> %0
+}
+
+define <vscale x 4 x i8> @test_4xi8(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2) nounwind {
+; CHECK-LABEL: test_4xi8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2)
+  ret <vscale x 4 x i8> %0
+}
+
+define <vscale x 8 x i8> @test_8xi8(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2) nounwind {
+; CHECK-LABEL: test_8xi8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2)
+  ret <vscale x 8 x i8> %0
+}
+
+define <vscale x 16 x i8> @test_16xi8(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2) nounwind {
+; CHECK-LABEL: test_16xi8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v10
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2)
+  ret <vscale x 16 x i8> %0
+}
+
+define <vscale x 32 x i8> @test_32xi8(<vscale x 32 x i8> %in, <vscale x 32 x i8> %in2) nounwind {
+; CHECK-LABEL: test_32xi8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v12
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 32 x i8> %in, <vscale x 32 x i8> %in2)
+  ret <vscale x 32 x i8> %0
+}
+
+define <vscale x 64 x i8> @test_64xi8(<vscale x 64 x i8> %in, <vscale x 64 x i8> %in2) nounwind {
+; CHECK-LABEL: test_64xi8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v8, v8, v16
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 64 x i8> %in, <vscale x 64 x i8> %in2)
+  ret <vscale x 64 x i8> %0
+}
+
+define <vscale x 4 x i8> @test_specify_reg_mf2(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2) nounwind {
+; CHECK-LABEL: test_specify_reg_mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v2, v9
+; CHECK-NEXT:    vmv1r.v v1, v8
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v0, v1, v2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    vmv1r.v v8, v0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> asm "vadd.vv $0, $1, $2", "={v0},{v1},{v2}"(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2)
+  ret <vscale x 4 x i8> %0
+}
+
+define <vscale x 8 x i8> @test_specify_reg_m1(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2) nounwind {
+; CHECK-LABEL: test_specify_reg_m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v2, v9
+; CHECK-NEXT:    vmv1r.v v1, v8
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v0, v1, v2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    vmv1r.v v8, v0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> asm "vadd.vv $0, $1, $2", "={v0},{v1},{v2}"(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2)
+  ret <vscale x 8 x i8> %0
+}
+
+define <vscale x 16 x i8> @test_specify_reg_m2(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2) nounwind {
+; CHECK-LABEL: test_specify_reg_m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv2r.v v4, v10
+; CHECK-NEXT:    vmv2r.v v2, v8
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vadd.vv v0, v2, v4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    vmv2r.v v8, v0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> asm "vadd.vv $0, $1, $2", "={v0},{v2},{v4}"(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2)
+  ret <vscale x 16 x i8> %0
+}
+
+define <vscale x 1 x i1> @test_specify_reg_mask(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2) nounwind {
+; CHECK-LABEL: test_specify_reg_mask:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v2, v8
+; CHECK-NEXT:    vmv1r.v v1, v0
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    vmand.mm v0, v1, v2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i1> asm "vmand.mm $0, $1, $2", "={v0},{v1},{v2}"(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2)
+  ret <vscale x 1 x i1> %0
+}


        


More information about the llvm-commits mailing list