mirror of
https://github.com/ROCm/ROCm.git
synced 2026-04-05 03:01:17 -04:00
Conflicts: bin/triton-translate.cpp lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp lib/Dialect/TritonGPU/Transforms/RemoveLayoutConversions.cpp python/triton/compiler/compiler.py python/triton/runtime/jit.py python/tutorials/06-fused-attention.py test/Conversion/tritongpu_to_llvm.mlir
356 lines
17 KiB
C++
356 lines
17 KiB
C++
#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_UTILITY_H
|
|
#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_UTILITY_H
|
|
|
|
#include "mlir/Conversion/LLVMCommon/Pattern.h"
|
|
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
|
|
#include "triton/Analysis/Utility.h"
|
|
#include "triton/Conversion/MLIRTypes.h"
|
|
#include "triton/Conversion/TritonGPUToLLVM/PTXAsmFormat.h"
|
|
#include "triton/Conversion/TritonGPUToLLVM/GCNAsmFormat.h"
|
|
|
|
// Shortcuts for some commonly used LLVM ops to keep code simple and intuitive
|
|
// Operators
|
|
#define inttoptr(...) rewriter.create<LLVM::IntToPtrOp>(loc, __VA_ARGS__)
|
|
#define ptrtoint(...) rewriter.create<LLVM::PtrToIntOp>(loc, __VA_ARGS__)
|
|
#define zext(...) rewriter.create<LLVM::ZExtOp>(loc, __VA_ARGS__)
|
|
#define trunc(...) rewriter.create<LLVM::TruncOp>(loc, __VA_ARGS__)
|
|
#define sext(...) rewriter.create<LLVM::SExtOp>(loc, __VA_ARGS__)
|
|
#define fpext(...) rewriter.create<LLVM::FPExtOp>(loc, __VA_ARGS__)
|
|
#define trunc(...) rewriter.create<LLVM::TruncOp>(loc, __VA_ARGS__)
|
|
#define udiv(...) rewriter.create<LLVM::UDivOp>(loc, __VA_ARGS__)
|
|
#define urem(...) rewriter.create<LLVM::URemOp>(loc, __VA_ARGS__)
|
|
#define add(...) rewriter.create<LLVM::AddOp>(loc, __VA_ARGS__)
|
|
#define sub(...) rewriter.create<LLVM::SubOp>(loc, __VA_ARGS__)
|
|
#define fadd(...) rewriter.create<LLVM::FAddOp>(loc, __VA_ARGS__)
|
|
#define mul(...) rewriter.create<LLVM::MulOp>(loc, __VA_ARGS__)
|
|
#define fmul(...) rewriter.create<LLVM::FMulOp>(loc, __VA_ARGS__)
|
|
#define shl(...) rewriter.create<LLVM::ShlOp>(loc, __VA_ARGS__)
|
|
#define lshr(...) rewriter.create<LLVM::LShrOp>(loc, __VA_ARGS__)
|
|
#define ashr(...) rewriter.create<LLVM::AShrOp>(loc, __VA_ARGS__)
|
|
#define smax(...) rewriter.create<LLVM::SMaxOp>(loc, __VA_ARGS__)
|
|
#define umax(...) rewriter.create<LLVM::UMaxOp>(loc, __VA_ARGS__)
|
|
#define fmax(...) rewriter.create<LLVM::MaxNumOp>(loc, __VA_ARGS__)
|
|
#define smin(...) rewriter.create<LLVM::SMinOp>(loc, __VA_ARGS__)
|
|
#define umin(...) rewriter.create<LLVM::UMinOp>(loc, __VA_ARGS__)
|
|
#define fmin(...) rewriter.create<LLVM::MinNumOp>(loc, __VA_ARGS__)
|
|
#define shl(...) rewriter.create<LLVM::ShlOp>(loc, __VA_ARGS__)
|
|
#define lshr(...) rewriter.create<LLVM::LShrOp>(loc, __VA_ARGS__)
|
|
#define and_(...) rewriter.create<LLVM::AndOp>(loc, __VA_ARGS__)
|
|
#define or_(...) rewriter.create<LLVM::OrOp>(loc, __VA_ARGS__)
|
|
#define xor_(...) rewriter.create<LLVM::XOrOp>(loc, __VA_ARGS__)
|
|
#define or_(...) rewriter.create<LLVM::OrOp>(loc, __VA_ARGS__)
|
|
#define bitcast(val__, type__) \
|
|
rewriter.create<LLVM::BitcastOp>(loc, type__, val__)
|
|
#define addrspacecast(val__, type__) \
|
|
rewriter.create<LLVM::AddrSpaceCastOp>(loc, type__, val__)
|
|
#define gep(...) rewriter.create<LLVM::GEPOp>(loc, __VA_ARGS__)
|
|
#define ptr_ty(...) LLVM::LLVMPointerType::get(__VA_ARGS__)
|
|
#define insert_val(...) rewriter.create<LLVM::InsertValueOp>(loc, __VA_ARGS__)
|
|
#define extract_val(...) rewriter.create<LLVM::ExtractValueOp>(loc, __VA_ARGS__)
|
|
#define insert_element(...) \
|
|
rewriter.create<LLVM::InsertElementOp>(loc, __VA_ARGS__)
|
|
#define extract_element(...) \
|
|
rewriter.create<LLVM::ExtractElementOp>(loc, __VA_ARGS__)
|
|
#define load(...) rewriter.create<LLVM::LoadOp>(loc, __VA_ARGS__)
|
|
#define store(val, ptr) rewriter.create<LLVM::StoreOp>(loc, val, ptr)
|
|
#define load_dsmem(...) LLVM::createLoadDSmem(loc, rewriter, __VA_ARGS__)
|
|
#define store_dsmem(...) LLVM::createStoreDSmem(loc, rewriter, __VA_ARGS__)
|
|
#define fcmp_ogt(lhs, rhs) \
|
|
rewriter.create<LLVM::FCmpOp>(loc, rewriter.getI1Type(), \
|
|
LLVM::FCmpPredicate::ogt, lhs, rhs)
|
|
#define fcmp_olt(lhs, rhs) \
|
|
rewriter.create<LLVM::FCmpOp>(loc, rewriter.getI1Type(), \
|
|
LLVM::FCmpPredicate::olt, lhs, rhs)
|
|
#define fcmp_eq(lhs, rhs) \
|
|
rewriter.create<LLVM::FCmpOp>(loc, rewriter.getI1Type(), \
|
|
LLVM::FCmpPredicate::oeq, lhs, rhs)
|
|
#define icmp_eq(...) \
|
|
rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::eq, __VA_ARGS__)
|
|
#define icmp_ne(...) \
|
|
rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::ne, __VA_ARGS__)
|
|
#define icmp_slt(...) \
|
|
rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::slt, __VA_ARGS__)
|
|
#define icmp_sle(...) \
|
|
rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::sle, __VA_ARGS__)
|
|
#define icmp_sgt(...) \
|
|
rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::sgt, __VA_ARGS__)
|
|
#define icmp_sge(...) \
|
|
rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::sge, __VA_ARGS__)
|
|
#define icmp_ult(...) \
|
|
rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::ult, __VA_ARGS__)
|
|
#define icmp_ule(...) \
|
|
rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::ule, __VA_ARGS__)
|
|
#define icmp_ugt(...) \
|
|
rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::ugt, __VA_ARGS__)
|
|
#define icmp_uge(...) \
|
|
rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::uge, __VA_ARGS__)
|
|
#define select(...) rewriter.create<LLVM::SelectOp>(loc, __VA_ARGS__)
|
|
#define address_of(...) rewriter.create<LLVM::AddressOfOp>(loc, __VA_ARGS__)
|
|
#define barrier() rewriter.create<mlir::gpu::BarrierOp>(loc)
|
|
#define barSync(rewriter, op, bar, numThreads) \
|
|
do { \
|
|
::mlir::triton::PTXBuilder ptxBuilder; \
|
|
auto &barSyncOp = *ptxBuilder.create<>("bar.sync"); \
|
|
barSyncOp(ptxBuilder.newConstantOperand(bar), \
|
|
ptxBuilder.newConstantOperand(numThreads)); \
|
|
auto voidTy = void_ty(op->getContext()); \
|
|
ptxBuilder.launch(rewriter, op->getLoc(), voidTy); \
|
|
} while (0)
|
|
#define undef(...) rewriter.create<LLVM::UndefOp>(loc, __VA_ARGS__)
|
|
#define null(...) rewriter.create<LLVM::ZeroOp>(loc, __VA_ARGS__)
|
|
#define call(...) rewriter.create<LLVM::CallOp>(loc, __VA_ARGS__)
|
|
|
|
// Types
|
|
#define int_ty(width) rewriter.getIntegerType(width)
|
|
#define i64_ty rewriter.getIntegerType(64)
|
|
#define i32_ty rewriter.getIntegerType(32)
|
|
#define i16_ty rewriter.getIntegerType(16)
|
|
#define i32_ty rewriter.getIntegerType(32)
|
|
#define i64_ty rewriter.getIntegerType(64)
|
|
#define ui32_ty rewriter.getIntegerType(32, false)
|
|
#define f16_ty rewriter.getF16Type()
|
|
#define bf16_ty rewriter.getBF16Type()
|
|
#define i8_ty rewriter.getIntegerType(8)
|
|
#define i1_ty rewriter.getI1Type()
|
|
#define f32_ty rewriter.getF32Type()
|
|
#define f64_ty rewriter.getF64Type()
|
|
#define vec_ty(type, num) VectorType::get(num, type)
|
|
#define void_ty(ctx) LLVM::LLVMVoidType::get(ctx)
|
|
#define struct_ty(...) LLVM::LLVMStructType::getLiteral(ctx, __VA_ARGS__)
|
|
#define array_ty(elemTy, count) LLVM::LLVMArrayType::get(elemTy, count)
|
|
|
|
// Constants
|
|
#define f32_val(...) LLVM::createConstantF32(loc, rewriter, __VA_ARGS__)
|
|
#define f64_val(...) LLVM::createConstantF64(loc, rewriter, __VA_ARGS__)
|
|
#define i32_val(...) LLVM::createConstantI32(loc, rewriter, __VA_ARGS__)
|
|
#define int_val(width, val) \
|
|
LLVM::createLLVMIntegerConstant(rewriter, loc, width, val)
|
|
#define tid_val() getThreadId(rewriter, loc)
|
|
|
|
// Attributes
|
|
#define i32_arr_attr(...) rewriter.getI32ArrayAttr({__VA_ARGS__})
|
|
#define i64_arr_attr(...) rewriter.getI64ArrayAttr({__VA_ARGS__})
|
|
|
|
namespace mlir {
|
|
namespace triton {
|
|
|
|
// Delinearize supposing order is [0, 1, .. , n]
|
|
template <typename T>
|
|
llvm::SmallVector<T> getMultiDimIndexImpl(T linearIndex,
|
|
llvm::ArrayRef<T> shape) {
|
|
// shape: {a, b, c, d} -> accMul: {1, a, a*b, a*b*c}
|
|
size_t rank = shape.size();
|
|
T accMul = product(shape.drop_back());
|
|
T linearRemain = linearIndex;
|
|
llvm::SmallVector<T> multiDimIndex(rank);
|
|
for (int i = rank - 1; i >= 0; --i) {
|
|
multiDimIndex[i] = linearRemain / accMul;
|
|
linearRemain = linearRemain % accMul;
|
|
if (i != 0) {
|
|
accMul = accMul / shape[i - 1];
|
|
}
|
|
}
|
|
return multiDimIndex;
|
|
}
|
|
|
|
template <typename T>
|
|
llvm::SmallVector<T> getMultiDimIndex(T linearIndex, llvm::ArrayRef<T> shape,
|
|
llvm::ArrayRef<unsigned> order) {
|
|
size_t rank = shape.size();
|
|
assert(rank == order.size());
|
|
auto reordered = reorder(shape, order);
|
|
auto reorderedMultiDim = getMultiDimIndexImpl<T>(linearIndex, reordered);
|
|
llvm::SmallVector<T> multiDim(rank);
|
|
for (unsigned i = 0; i < rank; ++i) {
|
|
multiDim[order[i]] = reorderedMultiDim[i];
|
|
}
|
|
return multiDim;
|
|
}
|
|
|
|
// Linearize supposing order is [0, 1, .. , n]
|
|
template <typename T>
|
|
T getLinearIndexImpl(llvm::ArrayRef<T> multiDimIndex, llvm::ArrayRef<T> shape) {
|
|
assert(multiDimIndex.size() == shape.size());
|
|
// shape: {a, b, c, d} -> accMul: {1, a, a*b, a*b*c}
|
|
size_t rank = shape.size();
|
|
T accMul = product(shape.drop_back());
|
|
T linearIndex = 0;
|
|
for (int i = rank - 1; i >= 0; --i) {
|
|
linearIndex += multiDimIndex[i] * accMul;
|
|
if (i != 0) {
|
|
accMul = accMul / shape[i - 1];
|
|
}
|
|
}
|
|
return linearIndex;
|
|
}
|
|
|
|
template <typename T>
|
|
T getLinearIndex(llvm::ArrayRef<T> multiDimIndex, llvm::ArrayRef<T> shape,
|
|
llvm::ArrayRef<unsigned> order) {
|
|
assert(shape.size() == order.size());
|
|
return getLinearIndexImpl<T>(reorder(multiDimIndex, order),
|
|
reorder(shape, order));
|
|
}
|
|
|
|
} // namespace triton
|
|
|
|
namespace LLVM {
|
|
using namespace mlir::triton;
|
|
|
|
Value createConstantI32(Location loc, OpBuilder &rewriter, int32_t v);
|
|
|
|
/// Create a 32-bit float constant.
|
|
Value createConstantF32(Location loc, OpBuilder &rewriter, float v);
|
|
|
|
/// Create a 64-bit float constant.
|
|
Value createConstantF64(Location loc, OpBuilder &rewriter, float v);
|
|
|
|
/// Create an index type constant.
|
|
Value createIndexConstant(OpBuilder &builder, Location loc,
|
|
TypeConverter *converter, int64_t value);
|
|
|
|
/// Create an integer constant of \param width bits.
|
|
Value createLLVMIntegerConstant(OpBuilder &builder, Location loc, short width,
|
|
int64_t value);
|
|
|
|
/// Usage of macro load_dsmem
|
|
/// (1) load_dsmem(addr, ctaId)
|
|
/// (2) load_dsmem(addr, ctaId, vec)
|
|
Value createLoadDSmem(Location loc, PatternRewriter &rewriter, Value addr,
|
|
Value ctaId);
|
|
SmallVector<Value> createLoadDSmem(Location loc, PatternRewriter &rewriter,
|
|
Value addr, Value ctaId, unsigned vec);
|
|
|
|
/// Usage of macro store_dsmem
|
|
/// (1) store_dsmem(addr, ctaId, value, pred)
|
|
/// (2) store_dsmem(addr, ctaId, value)
|
|
/// (3) store_dsmem(addr, ctaId, values, pred)
|
|
/// (4) store_dsmem(addr, ctaId, values)
|
|
void createStoreDSmem(Location loc, PatternRewriter &rewriter, Value addr,
|
|
Value ctaId, Value value, Value pred);
|
|
void createStoreDSmem(Location loc, PatternRewriter &rewriter, Value addr,
|
|
Value ctaId, Value value);
|
|
void createStoreDSmem(Location loc, PatternRewriter &rewriter, Value addr,
|
|
Value ctaId, ArrayRef<Value> values, Value pred);
|
|
void createStoreDSmem(Location loc, PatternRewriter &rewriter, Value addr,
|
|
Value ctaId, ArrayRef<Value> values);
|
|
|
|
/// Helper function to get strides from a given shape and its order
|
|
SmallVector<Value>
|
|
getStridesFromShapeAndOrder(ArrayRef<int64_t> shape, ArrayRef<unsigned> order,
|
|
Location loc, ConversionPatternRewriter &rewriter);
|
|
struct SharedMemoryObject {
|
|
Value base; // i32 ptr. The start address of the shared memory object after
|
|
// the initial allocation or the last slicing operation.
|
|
// We need to store strides as Values, not integers, because the
|
|
// extract_slice instruction can take a slice at arbitrary offsets.
|
|
// Take $a[16:32, 16:32] as an example; though we know the stride of $a[0] is
|
|
// 32, we need to let the instruction that uses $a be aware of that.
|
|
// Otherwise, when we use $a, we only know that the shape of $a is 16x16. If
|
|
// we store strides into an attribute array of integers, the information
|
|
// cannot pass through block argument assignment because attributes are
|
|
// associated with operations, not Values.
|
|
// TODO(Keren): We may need to figure out a way to store strides as integers
|
|
// if we want to support more optimizations.
|
|
SmallVector<Value>
|
|
strides; // i32 int. The strides of the shared memory object.
|
|
SmallVector<Value> offsets; // i32 int.
|
|
// Offsets are applied at the last slicing operation.
|
|
// We can use offsets to recover the previous base.
|
|
// The offsets are zero at the initial allocation.
|
|
|
|
SharedMemoryObject(Value base, ArrayRef<Value> strides,
|
|
ArrayRef<Value> offsets)
|
|
: base(base), strides(strides.begin(), strides.end()),
|
|
offsets(offsets.begin(), offsets.end()) {}
|
|
|
|
SharedMemoryObject(Value base, ArrayRef<int64_t> shape,
|
|
ArrayRef<unsigned> order, Location loc,
|
|
ConversionPatternRewriter &rewriter)
|
|
: base(base) {
|
|
strides = getStridesFromShapeAndOrder(shape, order, loc, rewriter);
|
|
offsets.append(order.size(), i32_val(0));
|
|
}
|
|
|
|
SmallVector<Value> getElems() const {
|
|
SmallVector<Value> elems;
|
|
elems.push_back(base);
|
|
elems.append(strides.begin(), strides.end());
|
|
elems.append(offsets.begin(), offsets.end());
|
|
return elems;
|
|
}
|
|
|
|
SmallVector<Type> getTypes() const {
|
|
SmallVector<Type> types;
|
|
types.push_back(base.getType());
|
|
types.append(strides.size(), IntegerType::get(base.getContext(), 32));
|
|
types.append(offsets.size(), IntegerType::get(base.getContext(), 32));
|
|
return types;
|
|
}
|
|
|
|
Value getCSwizzleOffset(int order) const {
|
|
assert(order >= 0 && order < strides.size());
|
|
return offsets[order];
|
|
}
|
|
|
|
Value getBaseBeforeSlice(int order, Location loc,
|
|
ConversionPatternRewriter &rewriter) const {
|
|
Value cSwizzleOffset = getCSwizzleOffset(order);
|
|
Value offset = sub(i32_val(0), cSwizzleOffset);
|
|
Type type = base.getType();
|
|
return gep(type, base, offset);
|
|
}
|
|
};
|
|
|
|
SharedMemoryObject
|
|
getSharedMemoryObjectFromStruct(Location loc, Value llvmStruct,
|
|
ConversionPatternRewriter &rewriter);
|
|
|
|
// Convert an \param index to a multi-dim coordinate given \param shape and
|
|
// \param order.
|
|
SmallVector<Value> delinearize(ConversionPatternRewriter &rewriter,
|
|
Location loc, Value linear,
|
|
ArrayRef<unsigned> shape,
|
|
ArrayRef<unsigned> order);
|
|
|
|
SmallVector<Value> delinearize(ConversionPatternRewriter &rewriter,
|
|
Location loc, unsigned linear,
|
|
ArrayRef<unsigned> shape);
|
|
|
|
SmallVector<Value> delinearize(ConversionPatternRewriter &rewriter,
|
|
Location loc, Value linear,
|
|
ArrayRef<unsigned> shape);
|
|
|
|
Value linearize(ConversionPatternRewriter &rewriter, Location loc,
|
|
ArrayRef<Value> multiDim, ArrayRef<unsigned> shape,
|
|
ArrayRef<unsigned> order);
|
|
|
|
Value linearize(ConversionPatternRewriter &rewriter, Location loc,
|
|
ArrayRef<Value> multiDim, ArrayRef<unsigned> shape);
|
|
|
|
Value storeShared(ConversionPatternRewriter &rewriter, Location loc, Value ptr,
|
|
Value val, Value pred);
|
|
|
|
Value loadShared(ConversionPatternRewriter &rewriter, Location loc, Value ptr,
|
|
Value pred);
|
|
|
|
Value shflSync(Location loc, ConversionPatternRewriter &rewriter, Value val,
|
|
int i);
|
|
Value shflUpSync(Location loc, ConversionPatternRewriter &rewriter, Value val,
|
|
int i, Value laneId);
|
|
Value shflIdxSync(Location loc, ConversionPatternRewriter &rewriter, Value val,
|
|
int i);
|
|
Value shflIdxSync(Location loc, ConversionPatternRewriter &rewriter, Value val,
|
|
Value i);
|
|
Value getSRegValue(OpBuilder &b, Location loc, const std::string &sRegStr);
|
|
Value addStringToModule(Location loc, ConversionPatternRewriter &rewriter,
|
|
StringRef key, StringRef content);
|
|
|
|
} // namespace LLVM
|
|
|
|
bool isF8(Type eType);
|
|
|
|
} // namespace mlir
|
|
|
|
#endif
|