Skip to content

Commit

Permalink
Merge pull request #1283 from j2kun:convert-layout-op
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 718020729
  • Loading branch information
copybara-github committed Jan 21, 2025
2 parents c6dea12 + 9d2a65b commit 6e18acf
Show file tree
Hide file tree
Showing 4 changed files with 66 additions and 0 deletions.
23 changes: 23 additions & 0 deletions lib/Dialect/TensorExt/IR/TensorExtOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include "llvm/include/llvm/ADT/STLExtras.h" // from @llvm-project
#include "mlir/include/mlir/Dialect/Arith/IR/Arith.h" // from @llvm-project
#include "mlir/include/mlir/Dialect/Tensor/IR/Tensor.h" // from @llvm-project
#include "mlir/include/mlir/IR/AffineMap.h" // from @llvm-project
#include "mlir/include/mlir/IR/MLIRContext.h" // from @llvm-project
#include "mlir/include/mlir/IR/Matchers.h" // from @llvm-project
#include "mlir/include/mlir/IR/PatternMatch.h" // from @llvm-project
Expand Down Expand Up @@ -36,6 +37,28 @@ LogicalResult RotateOp::verify() {
return success();
}

LogicalResult ConvertLayoutOp::verify() {
int64_t rank = cast<RankedTensorType>(getTensor().getType()).getRank();
const AffineMap &fromLayout = getFromLayout().getValue();
const AffineMap &toLayout = getToLayout().getValue();

if (rank != fromLayout.getNumDims() || rank != toLayout.getNumDims()) {
std::string fromLayoutStr, toLayoutStr;
llvm::raw_string_ostream fromLayoutStream(fromLayoutStr),
toLayoutStream(toLayoutStr);
fromLayout.print(fromLayoutStream);
toLayout.print(toLayoutStream);

return emitOpError()
<< "requires tensor rank to match the layout map's dimension count"
"but found rank "
<< rank << " and maps " << fromLayoutStream.str() << " and "
<< toLayoutStream.str();
}

return success();
}

} // namespace tensor_ext
} // namespace heir
} // namespace mlir
16 changes: 16 additions & 0 deletions lib/Dialect/TensorExt/IR/TensorExtOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ include "mlir/Interfaces/SideEffectInterfaces.td"
class TensorExt_Op<string mnemonic, list<Trait> traits = []> :
Op<TensorExt_Dialect, mnemonic, traits> {
let cppNamespace = "::mlir::heir::tensor_ext";
let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)";
}

def TensorExt_RotateOp : TensorExt_Op<"rotate", [Pure, AllTypesMatch<["tensor", "output"]>]> {
Expand Down Expand Up @@ -48,4 +49,19 @@ def TensorExt_RotateOp : TensorExt_Op<"rotate", [Pure, AllTypesMatch<["tensor",
let hasVerifier = 1;
}

def TensorExt_ConvertLayoutOp : TensorExt_Op<"convert_layout", [Pure, AllTypesMatch<["tensor", "output"]>]> {
let summary = "Convert from one layout to another.";
let description = [{
This op represents the conversion of a tensor from one packed layout to
another. This is implemented via a "shift network" of ciphertext rotations,
plaintext masks (ciphertext-plaintext multiplications), and additions.

This op is inserted by layout selection passes.
}];
let assemblyFormat = "operands attr-dict `:` type($output)";
let arguments = (ins AnyRankedTensor:$tensor, Builtin_AffineMapAttr:$from_layout, Builtin_AffineMapAttr:$to_layout);
let results = (outs AnyRankedTensor:$output);
let hasVerifier = 1;
}

#endif // LIB_DIALECT_TENSOREXT_IR_TENSOREXTOPS_TD_
20 changes: 20 additions & 0 deletions tests/Dialect/TensorExt/IR/convert_layout_verifier.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
// RUN: heir-opt --verify-diagnostics --split-input-file %s


#row_major = affine_map<(d0, d1) -> (16*d0 + d1)>
#col_major = affine_map<(d0) -> (d0)>
func.func @test_convert_layout(%0: tensor<16x16xi32>) -> tensor<16x16xi32> {
// expected-error@+1 {{requires tensor rank to match the layout map's dimension count}}
%1 = tensor_ext.convert_layout %0 {from_layout = #row_major, to_layout = #col_major} : tensor<16x16xi32>
return %1 : tensor<16x16xi32>
}

// -----

#row_major = affine_map<(d0, d1) -> (16*d0 + d1)>
#col_major = affine_map<(d0) -> (d0)>
func.func @test_convert_layout(%0: tensor<16x16xi32>) -> tensor<16x16xi32> {
// expected-error@+1 {{requires tensor rank to match the layout map's dimension count}}
%1 = tensor_ext.convert_layout %0 {from_layout = #col_major, to_layout = #row_major} : tensor<16x16xi32>
return %1 : tensor<16x16xi32>
}
7 changes: 7 additions & 0 deletions tests/Dialect/TensorExt/IR/ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,10 @@ func.func @test_rotate(%0: tensor<16xi32>) -> tensor<16xi32> {
%1 = tensor_ext.rotate %0, %c1 : tensor<16xi32>, i32
return %1 : tensor<16xi32>
}

#row_major = affine_map<(d0, d1) -> (16*d0 + d1)>
#col_major = affine_map<(d0, d1) -> (16*d1 + d0)>
func.func @test_convert_layout(%0: tensor<16x16xi32>) -> tensor<16x16xi32> {
%1 = tensor_ext.convert_layout %0 {from_layout = #row_major, to_layout = #col_major} : tensor<16x16xi32>
return %1 : tensor<16x16xi32>
}

0 comments on commit 6e18acf

Please sign in to comment.