[Mlir-commits] [mlir] [MLIR][NVVM] Fix undef in cp.async.bulk.tensor.reduce Op (PR #157423)
Durgadoss R
llvmlistbot at llvm.org
Mon Sep 15 07:02:05 PDT 2025
================
@@ -1634,53 +1634,124 @@ CpAsyncBulkTensorSharedCTAToGlobalOp::getIntrinsicIDAndArgs(
return {id, std::move(args)};
}
-#define CP_ASYNC_BULK_TENSOR_REDUCE_MODE(op, dim, mode) \
- llvm::Intrinsic::nvvm_cp_async_bulk_tensor_##op##_##mode##_##dim##d
+NVVM::IDArgPair CpAsyncBulkTensorReduceOp::getIntrinsicIDAndArgs(
+ Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
+ auto thisOp = cast<NVVM::CpAsyncBulkTensorReduceOp>(op);
+ llvm::LLVMContext &ctx = mt.getLLVMContext();
-#define CP_ASYNC_BULK_TENSOR_REDUCE(op, dim, is_im2col) \
- is_im2col ? CP_ASYNC_BULK_TENSOR_REDUCE_MODE(op, dim, im2col) \
- : CP_ASYNC_BULK_TENSOR_REDUCE_MODE(op, dim, tile)
+ llvm::SmallVector<llvm::Value *> args;
-#define GET_CP_ASYNC_BULK_TENSOR_ID(op, dims, is_im2col) \
- [&]() -> auto { \
- switch (dims) { \
- case 1: \
- return CP_ASYNC_BULK_TENSOR_REDUCE_MODE(op, 1, tile); \
- case 2: \
- return CP_ASYNC_BULK_TENSOR_REDUCE_MODE(op, 2, tile); \
- case 3: \
- return CP_ASYNC_BULK_TENSOR_REDUCE(op, 3, is_im2col); \
- case 4: \
- return CP_ASYNC_BULK_TENSOR_REDUCE(op, 4, is_im2col); \
- case 5: \
- return CP_ASYNC_BULK_TENSOR_REDUCE(op, 5, is_im2col); \
- default: \
- llvm_unreachable("Invalid TensorDim in CpAsyncBulkTensorReduceOp."); \
- } \
- }()
+ // Arguments to the intrinsic:
+ // shared_mem_ptr, tmaDesc, tensorDims
+ // cache_hint(if applicable) and flag(boolean)
+ args.push_back(mt.lookupValue(thisOp.getSrcMem()));
+ args.push_back(mt.lookupValue(thisOp.getTmaDescriptor()));
+
+ for (Value v : thisOp.getCoordinates())
+ args.push_back(mt.lookupValue(v));
+
+ mlir::Value cacheHint = thisOp.getL2CacheHint();
+ const bool hasCacheHint = static_cast<bool>(cacheHint);
+ llvm::Value *i64ZeroValue =
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(ctx), 0);
+ args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64ZeroValue);
+ args.push_back(builder.getInt1(hasCacheHint));
+
+ const unsigned NI = llvm::Intrinsic::not_intrinsic;
+ static constexpr llvm::Intrinsic::ID IDTable[][2][6] = {
+ // RedTy::ADD
+ {{NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_1d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_2d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_5d},
+ {NI, NI, NI,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_im2col_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_im2col_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_im2col_5d}},
+ // RedTy::MIN
+ {{NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_1d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_2d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_5d},
+ {NI, NI, NI,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_im2col_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_im2col_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_im2col_5d}},
+ // RedTy::MAX
+ {{NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_1d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_2d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_5d},
+ {NI, NI, NI,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_im2col_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_im2col_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_im2col_5d}},
+ // RedTy::INC
+ {{NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_1d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_2d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_5d},
+ {NI, NI, NI,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_im2col_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_im2col_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_im2col_5d}},
+ // RedTy::DEC
+ {{NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_1d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_2d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_5d},
+ {NI, NI, NI,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_im2col_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_im2col_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_im2col_5d}},
+ // RedTy::AND
+ {{NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_1d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_2d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_5d},
+ {NI, NI, NI,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_im2col_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_im2col_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_im2col_5d}},
+ // RedTy::OR
+ {{NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_1d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_2d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_5d},
+ {NI, NI, NI,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_im2col_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_im2col_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_im2col_5d}},
+ // RedTy::XOR
+ {{NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_1d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_2d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_5d},
+ {NI, NI, NI,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_im2col_3d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_im2col_4d,
+ llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_im2col_5d}},
+ };
+
+ static_assert(getMaxEnumValForTMAReduxKind() == std::size(IDTable) - 1,
+ "TMAReduxKinds must match number of rows in IDTable");
+
+ size_t redKind = static_cast<size_t>(thisOp.getRedKind());
+ size_t mode = static_cast<size_t>(thisOp.getMode());
+ size_t dim = thisOp.getCoordinates().size();
+ llvm::Intrinsic::ID intrinsicID = IDTable[redKind][mode][dim];
+ if (intrinsicID == llvm::Intrinsic::not_intrinsic)
+ llvm_unreachable("Invalid intrinsic for CpAsyncBulkTensorReduceOp.");
----------------
durga4github wrote:
I thought llvm_unreachable will abort if hit, even in release builds, but assert may not (I believe it will be silently ignored if the release-build does not have assertions ON).
https://github.com/llvm/llvm-project/pull/157423
More information about the Mlir-commits
mailing list