[Mlir-commits] [mlir] 83003db - [mlir] [LLVMIR] add all vector reduction intrinsics to LLVM IR dialect
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Mon Feb 10 09:19:35 PST 2020
Author: aartbik
Date: 2020-02-10T09:19:05-08:00
New Revision: 83003db430b90cb522a5c8fd147a59acb011d677
URL: https://github.com/llvm/llvm-project/commit/83003db430b90cb522a5c8fd147a59acb011d677
DIFF: https://github.com/llvm/llvm-project/commit/83003db430b90cb522a5c8fd147a59acb011d677.diff
LOG: [mlir] [LLVMIR] add all vector reduction intrinsics to LLVM IR dialect
Summary:
This allows for lowering of VectorOps (and others) into a LLVM IR
that maps directly to efficient implementations on the target machines.
http://llvm.org/docs/LangRef.html#experimental-vector-reduction-intrinsics
Reviewers: ftynse, andydavis1, nicolasvasilache, rriddle
Reviewed By: ftynse, rriddle
Subscribers: jfb, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D74171
Added:
Modified:
mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
mlir/test/Target/llvmir-intrinsics.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
index 7375a9cddc32..a58d9af0666a 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
@@ -96,4 +96,42 @@ class LLVM_EnumAttr<string name, string llvmName, string description,
string llvmClassName = llvmName;
}
+// LLVM vector reduction over a single vector.
+class LLVM_VectorReduction<string mnem> :
+ LLVM_IntrOp<"experimental.vector.reduce." # mnem, []>,
+ Arguments<(ins LLVM_Type)>, Results<(outs LLVM_Type:$res)> {
+ let llvmBuilder = [{
+ llvm::Module *module = builder.GetInsertBlock()->getModule();
+ llvm::Function *fn = llvm::Intrinsic::getDeclaration(
+ module,
+ llvm::Intrinsic::experimental_vector_reduce_}] #
+ !subst(".","_", mnem) # [{, {
+ opInst.getOperand(0).getType().cast<LLVM::LLVMType>()
+ .getUnderlyingType(),
+ });
+ auto operands = lookupValues(opInst.getOperands());
+ $res = builder.CreateCall(fn, operands);
+ }];
+}
+
+// LLVM vector reduction over a single vector, with an initial value.
+class LLVM_VectorReductionV2<string mnem> :
+ LLVM_IntrOp<"experimental.vector.reduce.v2." # mnem, []>,
+ Arguments<(ins LLVM_Type, LLVM_Type)>, Results<(outs LLVM_Type:$res)> {
+ let llvmBuilder = [{
+ llvm::Module *module = builder.GetInsertBlock()->getModule();
+ llvm::Function *fn = llvm::Intrinsic::getDeclaration(
+ module,
+ llvm::Intrinsic::experimental_vector_reduce_v2_}] #
+ !subst(".","_", mnem) # [{, {
+ opInst.getResult(0).getType().cast<LLVM::LLVMType>()
+ .getUnderlyingType(),
+ opInst.getOperand(1).getType().cast<LLVM::LLVMType>()
+ .getUnderlyingType(),
+ });
+ auto operands = lookupValues(opInst.getOperands());
+ $res = builder.CreateCall(fn, operands);
+ }];
+}
+
#endif // LLVMIR_OP_BASE
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index 72daab5dc194..ebba7a4a7aeb 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -780,6 +780,29 @@ def LLVM_Prefetch : LLVM_ZeroResultOp<"intr.prefetch">,
}];
}
+//
+// Vector Reductions.
+//
+
+def LLVM_experimental_vector_reduce_add : LLVM_VectorReduction<"add">;
+def LLVM_experimental_vector_reduce_and : LLVM_VectorReduction<"and">;
+def LLVM_experimental_vector_reduce_mul : LLVM_VectorReduction<"mul">;
+def LLVM_experimental_vector_reduce_fmax : LLVM_VectorReduction<"fmax">;
+def LLVM_experimental_vector_reduce_fmin : LLVM_VectorReduction<"fmin">;
+def LLVM_experimental_vector_reduce_or : LLVM_VectorReduction<"or">;
+def LLVM_experimental_vector_reduce_smax : LLVM_VectorReduction<"smax">;
+def LLVM_experimental_vector_reduce_smin : LLVM_VectorReduction<"smin">;
+def LLVM_experimental_vector_reduce_umax : LLVM_VectorReduction<"umax">;
+def LLVM_experimental_vector_reduce_umin : LLVM_VectorReduction<"umin">;
+def LLVM_experimental_vector_reduce_xor : LLVM_VectorReduction<"xor">;
+
+def LLVM_experimental_vector_reduce_v2_fadd : LLVM_VectorReductionV2<"fadd">;
+def LLVM_experimental_vector_reduce_v2_fmul : LLVM_VectorReductionV2<"fmul">;
+
+//
+// Atomic operations.
+//
+
def AtomicBinOpXchg : I64EnumAttrCase<"xchg", 0>;
def AtomicBinOpAdd : I64EnumAttrCase<"add", 1>;
def AtomicBinOpSub : I64EnumAttrCase<"sub", 2>;
diff --git a/mlir/test/Target/llvmir-intrinsics.mlir b/mlir/test/Target/llvmir-intrinsics.mlir
index fcc110215229..9866a84efc9c 100644
--- a/mlir/test/Target/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/llvmir-intrinsics.mlir
@@ -99,6 +99,37 @@ llvm.func @copysign_test(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm<"<
llvm.return
}
+// CHECK-LABEL: @vector_reductions
+llvm.func @vector_reductions(%arg0: !llvm.float, %arg1: !llvm<"<8 x float>">, %arg2: !llvm<"<8 x i32>">) {
+ // CHECK: call i32 @llvm.experimental.vector.reduce.add.v8i32
+ "llvm.intr.experimental.vector.reduce.add"(%arg2) : (!llvm<"<8 x i32>">) -> !llvm.i32
+ // CHECK: call i32 @llvm.experimental.vector.reduce.and.v8i32
+ "llvm.intr.experimental.vector.reduce.and"(%arg2) : (!llvm<"<8 x i32>">) -> !llvm.i32
+ // CHECK: call float @llvm.experimental.vector.reduce.fmax.v8f32
+ "llvm.intr.experimental.vector.reduce.fmax"(%arg1) : (!llvm<"<8 x float>">) -> !llvm.float
+ // CHECK: call float @llvm.experimental.vector.reduce.fmin.v8f32
+ "llvm.intr.experimental.vector.reduce.fmin"(%arg1) : (!llvm<"<8 x float>">) -> !llvm.float
+ // CHECK: call i32 @llvm.experimental.vector.reduce.mul.v8i32
+ "llvm.intr.experimental.vector.reduce.mul"(%arg2) : (!llvm<"<8 x i32>">) -> !llvm.i32
+ // CHECK: call i32 @llvm.experimental.vector.reduce.or.v8i32
+ "llvm.intr.experimental.vector.reduce.or"(%arg2) : (!llvm<"<8 x i32>">) -> !llvm.i32
+ // CHECK: call i32 @llvm.experimental.vector.reduce.smax.v8i32
+ "llvm.intr.experimental.vector.reduce.smax"(%arg2) : (!llvm<"<8 x i32>">) -> !llvm.i32
+ // CHECK: call i32 @llvm.experimental.vector.reduce.smin.v8i32
+ "llvm.intr.experimental.vector.reduce.smin"(%arg2) : (!llvm<"<8 x i32>">) -> !llvm.i32
+ // CHECK: call i32 @llvm.experimental.vector.reduce.umax.v8i32
+ "llvm.intr.experimental.vector.reduce.umax"(%arg2) : (!llvm<"<8 x i32>">) -> !llvm.i32
+ // CHECK: call i32 @llvm.experimental.vector.reduce.umin.v8i32
+ "llvm.intr.experimental.vector.reduce.umin"(%arg2) : (!llvm<"<8 x i32>">) -> !llvm.i32
+ // CHECK: call float @llvm.experimental.vector.reduce.v2.fadd.f32.v8f32
+ "llvm.intr.experimental.vector.reduce.v2.fadd"(%arg0, %arg1) : (!llvm.float, !llvm<"<8 x float>">) -> !llvm.float
+ // CHECK: call float @llvm.experimental.vector.reduce.v2.fmul.f32.v8f32
+ "llvm.intr.experimental.vector.reduce.v2.fmul"(%arg0, %arg1) : (!llvm.float, !llvm<"<8 x float>">) -> !llvm.float
+ // CHECK: call i32 @llvm.experimental.vector.reduce.xor.v8i32
+ "llvm.intr.experimental.vector.reduce.xor"(%arg2) : (!llvm<"<8 x i32>">) -> !llvm.i32
+ llvm.return
+}
+
// Check that intrinsics are declared with appropriate types.
// CHECK-DAG: declare float @llvm.fma.f32(float, float, float)
// CHECK-DAG: declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #0
More information about the Mlir-commits
mailing list