commit 9719b8e203841b4b69a3d52919dd442a203b19e4 Author: Ana Pazos Date: Wed Nov 13 11:57:09 2013 -0800 Implemented aarch64 vmul_lane intrinsics Implemented aarch64 Neon scalar vmulx_lane intrinsics Implemented aarch64 Neon scalar vfma_lane intrinsics Implemented aarch64 Neon scalar vfms_lane intrinsics Implemented legacy vmul_n_f64, vmul_lane_f64, vmul_laneq_f64 intrinsics (v1f64 parameter type) using Neon scalar instructions. Implemented legacy vfma_lane_f64, vfms_lane_f64, vfma_laneq_f64, vfms_laneq_f64 intrinsics (v1f64 parameter type) using Neon scalar instructions. diff --git a/include/clang/Basic/arm_neon.td b/include/clang/Basic/arm_neon.td index cfde227..30d7ab9 100644 --- a/include/clang/Basic/arm_neon.td +++ b/include/clang/Basic/arm_neon.td @@ -107,6 +107,12 @@ def OP_MOVL_HI : Op; def OP_COPY_LN : Op; def OP_COPYQ_LN : Op; def OP_COPY_LNQ : Op; +def OP_SCALAR_MUL_LN : Op; +def OP_SCALAR_MUL_LNQ : Op; +def OP_SCALAR_MULX_LN : Op; +def OP_SCALAR_MULX_LNQ : Op; +def OP_SCALAR_VMULX_LN : Op; +def OP_SCALAR_VMULX_LNQ : Op; class Inst { string Name = n; @@ -743,11 +749,13 @@ def VQDMLSL_HIGH_LANE : SOpInst<"vqdmlsl_high_lane", "wwkdi", "si", def VQDMLSL_HIGH_LANEQ : SOpInst<"vqdmlsl_high_laneq", "wwkki", "si", OP_QDMLSLHi_LN>; -// Newly add double parameter for vmul_lane in aarch64 -def VMUL_LANE_A64 : IOpInst<"vmul_lane", "ddgi", "dQd", OP_MUL_LN>; +// Newly add double parameter for vmul_lane in aarch6 +// Note: d type is handled by SCALAR_VMUL_LANE +def VMUL_LANE_A64 : IOpInst<"vmul_lane", "ddgi", "Qd", OP_MUL_LN>; +// Note: d type is handled by SCALAR_VMUL_LANEQ def VMUL_LANEQ : IOpInst<"vmul_laneq", "ddji", - "sifdUsUiQsQiQfQUsQUiQfQd", OP_MUL_LN>; + "sifUsUiQsQiQfQUsQUiQfQd", OP_MUL_LN>; def VMULL_LANEQ : SOpInst<"vmull_laneq", "wdki", "siUsUi", OP_MULL_LN>; def VMULL_HIGH_LANE : SOpInst<"vmull_high_lane", "wkdi", "siUsUi", OP_MULLHi_LN>; @@ -763,8 +771,10 @@ def VQDMULL_HIGH_LANEQ : SOpInst<"vqdmull_high_laneq", "wkki", "si", def VQDMULH_LANEQ : SOpInst<"vqdmulh_laneq", "ddji", "siQsQi", OP_QDMULH_LN>; def VQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "ddji", "siQsQi", OP_QRDMULH_LN>; -def VMULX_LANE : IOpInst<"vmulx_lane", "ddgi", "fdQfQd", OP_MULX_LN>; -def VMULX_LANEQ : IOpInst<"vmulx_laneq", "ddji", "fdQfQd", OP_MULX_LN>; +// Note: d type implemented by SCALAR_VMULX_LANE +def VMULX_LANE : IOpInst<"vmulx_lane", "ddgi", "fQfQd", OP_MULX_LN>; +// Note: d type is implemented by SCALAR_VMULX_LANEQ +def VMULX_LANEQ : IOpInst<"vmulx_laneq", "ddji", "fQfQd", OP_MULX_LN>; //////////////////////////////////////////////////////////////////////////////// // Across vectors class @@ -1028,4 +1038,36 @@ def SCALAR_SQXTN : SInst<"vqmovn", "zs", "SsSiSl">; //////////////////////////////////////////////////////////////////////////////// // Scalar Unsigned Saturating Extract Narrow def SCALAR_UQXTN : SInst<"vqmovn", "zs", "SUsSUiSUl">; + +// Scalar Floating Point multiply (scalar, by element) +def SCALAR_FMUL_LANE : IOpInst<"vmul_lane", "ssdi", "SfSd", OP_SCALAR_MUL_LN>; +def SCALAR_FMUL_LANEQ : IOpInst<"vmul_laneq", "ssji", "SfSd", OP_SCALAR_MUL_LNQ>; + +// Scalar Floating Point multiply extended (scalar, by element) +def SCALAR_FMULX_LANE : IOpInst<"vmulx_lane", "ssdi", "SfSd", OP_SCALAR_MULX_LN>; +def SCALAR_FMULX_LANEQ : IOpInst<"vmulx_laneq", "ssji", "SfSd", OP_SCALAR_MULX_LNQ>; + +def SCALAR_VMUL_N : IInst<"vmul_n", "dds", "d">; + +// VMUL_LANE_A64 d type implemented using scalar mul lane +def SCALAR_VMUL_LANE : IInst<"vmul_lane", "ddgi", "d">; + +// VMUL_LANEQ d type implemented using scalar mul lane +def SCALAR_VMUL_LANEQ : IInst<"vmul_laneq", "ddji", "d">; + +// VMULX_LANE d type implemented using scalar vmulx_lane +def SCALAR_VMULX_LANE : IOpInst<"vmulx_lane", "ddgi", "d", OP_SCALAR_VMULX_LN>; + +// VMULX_LANEQ d type implemented using scalar vmulx_laneq +def SCALAR_VMULX_LANEQ : IOpInst<"vmulx_laneq", "ddji", "d", OP_SCALAR_VMULX_LNQ>; + +// Scalar Floating Point fused multiply-add (scalar, by element) +def SCALAR_FMLA_LANE : IInst<"vfma_lane", "sssdi", "SfSd">; +def SCALAR_FMLA_LANEQ : IInst<"vfma_laneq", "sssji", "SfSd">; + +// Scalar Floating Point fused multiply-subtract (scalar, by element) +def SCALAR_FMLS_LANE : IOpInst<"vfms_lane", "sssdi", "SfSd", OP_FMS_LN>; +def SCALAR_FMLS_LANEQ : IOpInst<"vfms_laneq", "sssji", "SfSd", OP_FMS_LNQ>; } + + diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 5ced543..e8ba292 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -1770,6 +1770,37 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, // argument that specifies the vector type, need to handle each case. switch (BuiltinID) { default: break; + case AArch64::BI__builtin_neon_vfmas_lane_f32: + case AArch64::BI__builtin_neon_vfmas_laneq_f32: + case AArch64::BI__builtin_neon_vfmad_lane_f64: + case AArch64::BI__builtin_neon_vfmad_laneq_f64: { + bool Quad = false; + if (BuiltinID == AArch64::BI__builtin_neon_vfmas_laneq_f32 || + BuiltinID == AArch64::BI__builtin_neon_vfmad_laneq_f64) + Quad = true; + llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType()); + Value *F = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty); + // extract lane acc += x * v[i] + Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); + return CGF.Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); + } + // Scalar Floating-point Multiply Extended + case AArch64::BI__builtin_neon_vmulxs_f32: + case AArch64::BI__builtin_neon_vmulxd_f64: { + Int = Intrinsic::aarch64_neon_vmulx; + llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType()); + return CGF.EmitNeonCall(CGF.CGM.getIntrinsic(Int, Ty), Ops, "vmulx"); + } + case AArch64::BI__builtin_neon_vmul_n_f64: { + // v1f64 vmul_n_f64 should be mapped to Neon scalar mul lane + llvm::Type *VTy = GetNeonType(&CGF, + NeonTypeFlags(NeonTypeFlags::Float64, false, false)); + Ops[0] = CGF.Builder.CreateBitCast(Ops[0], VTy); + llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0); + Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], Idx, "extract"); + Value *Result = CGF.Builder.CreateFMul(Ops[0], Ops[1]); + return CGF.Builder.CreateBitCast(Result, VTy); + } case AArch64::BI__builtin_neon_vget_lane_i8: case AArch64::BI__builtin_neon_vget_lane_i16: case AArch64::BI__builtin_neon_vget_lane_i32: @@ -2004,11 +2035,6 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, case AArch64::BI__builtin_neon_vqrdmulhs_s32: Int = Intrinsic::arm_neon_vqrdmulh; s = "vqrdmulh"; OverloadInt = true; break; - // Scalar Floating-point Multiply Extended - case AArch64::BI__builtin_neon_vmulxs_f32: - case AArch64::BI__builtin_neon_vmulxd_f64: - Int = Intrinsic::aarch64_neon_vmulx; - s = "vmulx"; OverloadInt = true; break; // Scalar Floating-point Reciprocal Step and case AArch64::BI__builtin_neon_vrecpss_f32: case AArch64::BI__builtin_neon_vrecpsd_f64: @@ -2839,9 +2865,22 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, case AArch64::BI__builtin_neon_vsha256su1q_v: return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256su1, Ty), Ops, "sha256su1"); + case AArch64::BI__builtin_neon_vmul_lane_v: + case AArch64::BI__builtin_neon_vmul_laneq_v: { + // v1f64 vmul_lane should be mapped to Neon scalar mul lane + bool Quad = false; + if (BuiltinID == AArch64::BI__builtin_neon_vmul_laneq_v) + Quad = true; + Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); + llvm::Type *VTy = GetNeonType(this, + NeonTypeFlags(NeonTypeFlags::Float64, false, Quad ? true : false)); + Ops[1] = Builder.CreateBitCast(Ops[1], VTy); + Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); + Value *Result = Builder.CreateFMul(Ops[0], Ops[1]); + return Builder.CreateBitCast(Result, Ty); + } // AArch64-only builtins - case AArch64::BI__builtin_neon_vfma_lane_v: case AArch64::BI__builtin_neon_vfmaq_laneq_v: { Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); Ops[0] = Builder.CreateBitCast(Ops[0], Ty); @@ -2866,12 +2905,46 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]); } - case AArch64::BI__builtin_neon_vfma_laneq_v: { + case AArch64::BI__builtin_neon_vfma_lane_v: { + llvm::VectorType *VTy = cast(Ty); + // v1f64 fma should be mapped to Neon scalar f64 fma + if (VTy && VTy->getElementType() == DoubleTy) { + Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); + Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); + llvm::Type *VTy = GetNeonType(this, + NeonTypeFlags(NeonTypeFlags::Float64, false, false)); + Ops[2] = Builder.CreateBitCast(Ops[2], VTy); + Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); + Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy); + Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); + return Builder.CreateBitCast(Result, Ty); + } Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); Ops[0] = Builder.CreateBitCast(Ops[0], Ty); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + Ops[2] = Builder.CreateBitCast(Ops[2], Ty); + Ops[2] = EmitNeonSplat(Ops[2], cast(Ops[3])); + return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]); + } + case AArch64::BI__builtin_neon_vfma_laneq_v: { llvm::VectorType *VTy = cast(Ty); + // v1f64 fma should be mapped to Neon scalar f64 fma + if (VTy && VTy->getElementType() == DoubleTy) { + Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); + Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); + llvm::Type *VTy = GetNeonType(this, + NeonTypeFlags(NeonTypeFlags::Float64, false, true)); + Ops[2] = Builder.CreateBitCast(Ops[2], VTy); + Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); + Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy); + Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); + return Builder.CreateBitCast(Result, Ty); + } + Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() * 2); Ops[2] = Builder.CreateBitCast(Ops[2], STy); diff --git a/test/CodeGen/aarch64-neon-2velem.c b/test/CodeGen/aarch64-neon-2velem.c index f34e11a..2a1eae4 100644 --- a/test/CodeGen/aarch64-neon-2velem.c +++ b/test/CodeGen/aarch64-neon-2velem.c @@ -722,6 +722,14 @@ float32x2_t test_vmul_lane_f32(float32x2_t a, float32x2_t v) { // CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] } + +float64x1_t test_vmul_lane_f64(float64x1_t a, float64x1_t v) { + // CHECK: test_vmul_lane_f64 + return vmul_lane_f64(a, v, 0); + // CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + + float32x4_t test_vmulq_lane_f32(float32x4_t a, float32x2_t v) { // CHECK: test_vmulq_lane_f32 return vmulq_lane_f32(a, v, 1); @@ -740,6 +748,13 @@ float32x2_t test_vmul_laneq_f32(float32x2_t a, float32x4_t v) { // CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] } +float64x1_t test_vmul_laneq_f64_0(float64x1_t a, float64x2_t v) { + // CHECK: test_vmul_laneq_f64_0 + return vmul_laneq_f64(a, v, 0); + // CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + + float32x4_t test_vmulq_laneq_f32(float32x4_t a, float32x4_t v) { // CHECK: test_vmulq_laneq_f32 return vmulq_laneq_f32(a, v, 1); diff --git a/test/CodeGen/aarch64-neon-scalar-x-indexed-elem.c b/test/CodeGen/aarch64-neon-scalar-x-indexed-elem.c new file mode 100644 index 0000000..9dc9b72 --- /dev/null +++ b/test/CodeGen/aarch64-neon-scalar-x-indexed-elem.c @@ -0,0 +1,130 @@ +// REQUIRES: aarch64-registered-target +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \ +// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s + +// Test new aarch64 intrinsics and types + +#include + +float32_t test_vmuls_lane_f32(float32_t a, float32x2_t b) { + // CHECK: test_vmuls_lane_f32 + return vmuls_lane_f32(a, b, 1); + // CHECK: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1] +} + +float64_t test_vmuld_lane_f64(float64_t a, float64x1_t b) { + // CHECK: test_vmuld_lane_f64 + return vmuld_lane_f64(a, b, 0); + // CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + +float32_t test_vmuls_laneq_f32(float32_t a, float32x4_t b) { + // CHECK: test_vmuls_laneq_f32 + return vmuls_laneq_f32(a, b, 3); + // CHECK: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3] +} + +float64_t test_vmuld_laneq_f64(float64_t a, float64x2_t b) { + // CHECK: test_vmuld_laneq_f64 + return vmuld_laneq_f64(a, b, 1); + // CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1] +} + +float64x1_t test_vmul_n_f64(float64x1_t a, float64_t b) { + // CHECK: test_vmul_n_f64 + return vmul_n_f64(a, b); + // CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + +float32_t test_vmulxs_lane_f32(float32_t a, float32x2_t b) { +// CHECK: test_vmulxs_lane_f32 + return vmulxs_lane_f32(a, b, 1); +// CHECK: fmulx {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1] +} + +float32_t test_vmulxs_laneq_f32(float32_t a, float32x4_t b) { +// CHECK: test_vmulxs_laneq_f32 + return vmulxs_laneq_f32(a, b, 3); +// CHECK: fmulx {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3] +} + +float64_t test_vmulxd_lane_f64(float64_t a, float64x1_t b) { +// CHECK: test_vmulxd_lane_f64 + return vmulxd_lane_f64(a, b, 0); +// CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + +float64_t test_vmulxd_laneq_f64(float64_t a, float64x2_t b) { +// CHECK: test_vmulxd_laneq_f64 + return vmulxd_laneq_f64(a, b, 1); +// CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1] +} + +// CHECK_AARCH64: test_vmulx_lane_f64 +float64x1_t test_vmulx_lane_f64(float64x1_t a, float64x1_t b) { + return vmulx_lane_f64(a, b, 0); + // CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + + +// CHECK_AARCH64: test_vmulx_laneq_f64_0 +float64x1_t test_vmulx_laneq_f64_0(float64x1_t a, float64x2_t b) { + return vmulx_laneq_f64(a, b, 0); + // CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + +// CHECK_AARCH64: test_vmulx_laneq_f64_1 +float64x1_t test_vmulx_laneq_f64_1(float64x1_t a, float64x2_t b) { + return vmulx_laneq_f64(a, b, 1); + // CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1] +} + + +// CHECK_AARCH64: test_vfmas_lane_f32 +float32_t test_vfmas_lane_f32(float32_t a, float32_t b, float32x2_t c) { + return vfmas_lane_f32(a, b, c, 1); + // CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1] +} + +// CHECK_AARCH64: test_vfmad_lane_f64 +float64_t test_vfmad_lane_f64(float64_t a, float64_t b, float64x1_t c) { + return vfmad_lane_f64(a, b, c, 0); + // CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + +// CHECK_AARCH64: test_vfmad_laneq_f64 +float64_t test_vfmad_laneq_f64(float64_t a, float64_t b, float64x2_t c) { + return vfmad_laneq_f64(a, b, c, 1); + // CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1] +} + +// CHECK_AARCH64: test_vfmss_lane_f32 +float32_t test_vfmss_lane_f32(float32_t a, float32_t b, float32x2_t c) { + return vfmss_lane_f32(a, b, c, 1); + // CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1] +} + +// CHECK_AARCH64: test_vfma_lane_f64 +float64x1_t test_vfma_lane_f64(float64x1_t a, float64x1_t b, float64x1_t v) { + return vfma_lane_f64(a, b, v, 0); + // CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + +// CHECK_AARCH64: test_vfms_lane_f64 +float64x1_t test_vfms_lane_f64(float64x1_t a, float64x1_t b, float64x1_t v) { + return vfms_lane_f64(a, b, v, 0); + // CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + +// CHECK_AARCH64: test_vfma_laneq_f64 +float64x1_t test_vfma_laneq_f64(float64x1_t a, float64x1_t b, float64x2_t v) { + return vfma_laneq_f64(a, b, v, 0); + // CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + +// CHECK_AARCH64: test_vfms_laneq_f64 +float64x1_t test_vfms_laneq_f64(float64x1_t a, float64x1_t b, float64x2_t v) { + return vfms_laneq_f64(a, b, v, 0); + // CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +} + diff --git a/utils/TableGen/NeonEmitter.cpp b/utils/TableGen/NeonEmitter.cpp index 4fd94a9..d0e0f59 100644 --- a/utils/TableGen/NeonEmitter.cpp +++ b/utils/TableGen/NeonEmitter.cpp @@ -128,7 +128,13 @@ enum OpKind { OpMovlHi, OpCopyLane, OpCopyQLane, - OpCopyLaneQ + OpCopyLaneQ, + OpScalarMulLane, + OpScalarMulLaneQ, + OpScalarMulXLane, + OpScalarMulXLaneQ, + OpScalarVMulXLane, + OpScalarVMulXLaneQ }; enum ClassKind { @@ -282,6 +288,12 @@ public: OpMap["OP_COPY_LN"] = OpCopyLane; OpMap["OP_COPYQ_LN"] = OpCopyQLane; OpMap["OP_COPY_LNQ"] = OpCopyLaneQ; + OpMap["OP_SCALAR_MUL_LN"]= OpScalarMulLane; + OpMap["OP_SCALAR_MUL_LNQ"]= OpScalarMulLaneQ; + OpMap["OP_SCALAR_MULX_LN"]= OpScalarMulXLane; + OpMap["OP_SCALAR_MULX_LNQ"]= OpScalarMulXLaneQ; + OpMap["OP_SCALAR_VMULX_LN"]= OpScalarVMulXLane; + OpMap["OP_SCALAR_VMULX_LNQ"]= OpScalarVMulXLaneQ; Record *SI = R.getClass("SInst"); Record *II = R.getClass("IInst"); @@ -1928,6 +1940,77 @@ static std::string GenOpString(const std::string &name, OpKind op, "(__c1, __d1); \\\n vset_lane_" + typeCode + "(__c2, __a1, __b1);"; break; } + case OpScalarMulLane: { + std::string typeCode = ""; + InstructionTypeCode(typestr, ClassS, quad, typeCode); + s += TypeString('s', typestr) + " __d1 = vget_lane_" + typeCode + + "(__b, __c);\\\n __a * __d1;"; + break; + } + case OpScalarMulLaneQ: { + std::string typeCode = ""; + InstructionTypeCode(typestr, ClassS, quad, typeCode); + s += TypeString('s', typestr) + " __d1 = vgetq_lane_" + typeCode + + "(__b, __c);\\\n __a * __d1;"; + break; + } + case OpScalarMulXLane: { + bool dummy = false; + char type = ClassifyType(typestr, dummy, dummy, dummy); + if (type == 'f') type = 's'; + std::string typeCode = ""; + InstructionTypeCode(typestr, ClassS, quad, typeCode); + s += TypeString('s', typestr) + " __d1 = vget_lane_" + typeCode + + "(__b, __c);\\\n vmulx" + type + "_" + + typeCode + "(__a, __d1);"; + break; + } + case OpScalarMulXLaneQ: { + bool dummy = false; + char type = ClassifyType(typestr, dummy, dummy, dummy); + if (type == 'f') type = 's'; + std::string typeCode = ""; + InstructionTypeCode(typestr, ClassS, quad, typeCode); + s += TypeString('s', typestr) + " __d1 = vgetq_lane_" + + typeCode + "(__b, __c);\\\n vmulx" + type + + "_" + typeCode + "(__a, __d1);"; + break; + } + + case OpScalarVMulXLane: { + bool dummy = false; + char type = ClassifyType(typestr, dummy, dummy, dummy); + if (type == 'f') type = 's'; + std::string typeCode = ""; + InstructionTypeCode(typestr, ClassS, quad, typeCode); + s += TypeString('s', typestr) + " __d1 = vget_lane_" + + typeCode + "(__a, 0);\\\n" + + " " + TypeString('s', typestr) + " __e1 = vget_lane_" + + typeCode + "(__b, __c);\\\n" + + " " + TypeString('s', typestr) + " __f1 = vmulx" + type + "_" + + typeCode + "(__d1, __e1);\\\n" + + " " + TypeString('d', typestr) + " __g1;\\\n" + + " vset_lane_" + typeCode + "(__f1, __g1, __c);"; + break; + } + + case OpScalarVMulXLaneQ: { + bool dummy = false; + char type = ClassifyType(typestr, dummy, dummy, dummy); + if (type == 'f') type = 's'; + std::string typeCode = ""; + InstructionTypeCode(typestr, ClassS, quad, typeCode); + s += TypeString('s', typestr) + " __d1 = vget_lane_" + + typeCode + "(__a, 0);\\\n" + + " " + TypeString('s', typestr) + " __e1 = vgetq_lane_" + + typeCode + "(__b, __c);\\\n" + + " " + TypeString('s', typestr) + " __f1 = vmulx" + type + "_" + + typeCode + "(__d1, __e1);\\\n" + + " " + TypeString('d', typestr) + " __g1;\\\n" + + " vset_lane_" + typeCode + "(__f1, __g1, 0);"; + break; + } + default: PrintFatalError("unknown OpKind!"); } @@ -2876,8 +2959,7 @@ static std::string GenTest(const std::string &name, StringRef outTypeStr, StringRef inTypeStr, bool isShift, bool isHiddenLOp, ClassKind ck, const std::string &InstName, - bool isA64, - std::string & testFuncProto) { + bool isA64, std::string & testFuncProto) { assert(!proto.empty() && ""); std::string s; @@ -2896,9 +2978,9 @@ static std::string GenTest(const std::string &name, // for aarch64 instructions yet std::vector FileCheckPatterns; if (!isA64) { - GenerateChecksForIntrinsic(name, proto, outTypeStr, inTypeStr, ck, InstName, - isHiddenLOp, FileCheckPatterns); - s+= "// CHECK_ARM: test_" + mangledName + "\n"; + GenerateChecksForIntrinsic(name, proto, outTypeStr, inTypeStr, ck, InstName, + isHiddenLOp, FileCheckPatterns); + s+= "// CHECK_ARM: test_" + mangledName + "\n"; } s += "// CHECK_AARCH64: test_" + mangledName + "\n"; @@ -2959,7 +3041,7 @@ static std::string GenTest(const std::string &name, void NeonEmitter::genTargetTest(raw_ostream &OS, StringMap &EmittedMap, bool isA64GenTest) { if (isA64GenTest) - OS << "#ifdef __aarch64__\n"; + OS << "#ifdef __aarch64__\n"; std::vector RV = Records.getAllDerivedDefinitions("Inst"); for (unsigned i = 0, e = RV.size(); i != e; ++i) { @@ -2995,17 +3077,17 @@ void NeonEmitter::genTargetTest(raw_ostream &OS, StringMap &EmittedMap, (void)ClassifyType(TypeVec[srcti], inQuad, dummy, dummy); if (srcti == ti || inQuad != outQuad) continue; - std::string testFuncProto; + std::string testFuncProto; std::string s = GenTest(name, Proto, TypeVec[ti], TypeVec[srcti], isShift, isHiddenLOp, ck, InstName, isA64, - testFuncProto); + testFuncProto); if (EmittedMap.count(testFuncProto)) continue; EmittedMap[testFuncProto] = kind; OS << s << "\n"; } } else { - std::string testFuncProto; + std::string testFuncProto; std::string s = GenTest(name, Proto, TypeVec[ti], TypeVec[ti], isShift, isHiddenLOp, ck, InstName, isA64, testFuncProto); if (EmittedMap.count(testFuncProto)) @@ -3017,7 +3099,7 @@ void NeonEmitter::genTargetTest(raw_ostream &OS, StringMap &EmittedMap, } if (isA64GenTest) - OS << "#endif\n"; + OS << "#endif\n"; } /// runTests - Write out a complete set of tests for all of the Neon /// intrinsics. @@ -3026,10 +3108,10 @@ void NeonEmitter::runTests(raw_ostream &OS) { "apcs-gnu\\\n" "// RUN: -target-cpu swift -ffreestanding -Os -S -o - %s\\\n" "// RUN: | FileCheck %s -check-prefix=CHECK_ARM\n" - "\n" - "// RUN: %clang_cc1 -triple aarch64-none-linux-gnu \\\n" - "// RUN -target-feature +neon -ffreestanding -S -o - %s \\\n" - "// RUN: | FileCheck %s -check-prefix=CHECK_AARCH64\n" + "\n" + "// RUN: %clang_cc1 -triple aarch64-none-linux-gnu \\\n" + "// RUN -target-feature +neon -ffreestanding -S -o - %s \\\n" + "// RUN: | FileCheck %s -check-prefix=CHECK_AARCH64\n" "\n" "// REQUIRES: long_tests\n" "\n"