Skip to content

Commit 3497c24

Browse files
Integrate LLVM at eda3e96b401a9b86132e39432e41e2000d1ab382 (#4208)
Update LLVM to llvm/llvm-project@eda3e96. The `ToMemrefOp` is replaced with `ToBufferOp`, as per the changes made here: llvm/llvm-project@8f91b10. Also, `getBackwardSlice` now has a return value of type LogicalResult. Changes made here: llvm/llvm-project@6a8dde0. This commit also adds the folder for `tril_indices` and `triu_indices` op for the case when the `row` and `col` values are 0. --------- Signed-off-by: Vivek Khandelwal <[email protected]>
1 parent 1cb25e9 commit 3497c24

File tree

10 files changed

+54
-15
lines changed

10 files changed

+54
-15
lines changed

externals/llvm-project

Submodule llvm-project updated 7297 files

include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17354,6 +17354,7 @@ def Torch_AtenTriuIndicesOp : Torch_Op<"aten.triu_indices", [
1735417354
printDefaultTorchOp(printer, *this, 7, 1);
1735517355
}
1735617356
}];
17357+
let hasFolder = 1;
1735717358
let hasVerifier = 1;
1735817359
}
1735917360

@@ -17384,6 +17385,7 @@ def Torch_AtenTrilIndicesOp : Torch_Op<"aten.tril_indices", [
1738417385
printDefaultTorchOp(printer, *this, 7, 1);
1738517386
}
1738617387
}];
17388+
let hasFolder = 1;
1738717389
let hasVerifier = 1;
1738817390
}
1738917391

lib/Dialect/TMTensor/Transforms/Bufferize.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@
66
//
77
//===----------------------------------------------------------------------===//
88

9-
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
109
#include "mlir/Dialect/Arith/IR/Arith.h"
1110
#include "mlir/Dialect/Arith/Utils/Utils.h"
11+
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
1212
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
1313
#include "mlir/Dialect/Func/IR/FuncOps.h"
1414
#include "mlir/Dialect/Func/Transforms/Passes.h"
@@ -20,6 +20,7 @@
2020
#include "mlir/IR/BuiltinDialect.h"
2121
#include "mlir/IR/Operation.h"
2222
#include "mlir/Pass/Pass.h"
23+
#include "mlir/Transforms/DialectConversion.h"
2324
#include "torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorDialect.h"
2425
#include "torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorOps.h"
2526
#include "torch-mlir-dialects/Dialect/TMTensor/Transforms/PassDetail.h"
@@ -177,7 +178,7 @@ struct TMTensorBufferizePass
177178
}
178179
if (isa<TensorType>(inputs[0].getType())) {
179180
// Tensor to MemRef cast.
180-
return builder.create<bufferization::ToMemrefOp>(loc, type, inputs[0]);
181+
return builder.create<bufferization::ToBufferOp>(loc, type, inputs[0]);
181182
}
182183
llvm_unreachable("only tensor/memref input types supported");
183184
});

lib/Dialect/Torch/IR/TorchOps.cpp

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5963,6 +5963,22 @@ LogicalResult AtenTriuIndicesOp::verify() {
59635963
return success();
59645964
}
59655965

5966+
OpFoldResult AtenTriuIndicesOp::fold(FoldAdaptor adaptor) {
5967+
int64_t row, col;
5968+
if (matchPattern(getRow(), m_TorchConstantInt(&row)) &&
5969+
matchPattern(getCol(), m_TorchConstantInt(&col)) && row == 0 &&
5970+
col == 0) {
5971+
// Get the result type (should be a tensor type)
5972+
auto resultTy = dyn_cast<ValueTensorType>(getType());
5973+
if (!resultTy || !resultTy.hasSizes() || !resultTy.hasDtype())
5974+
return nullptr;
5975+
auto shapedTy = resultTy.toBuiltinTensor();
5976+
// Return an empty tensor (0 elements)
5977+
return DenseElementsAttr::get(shapedTy, ArrayRef<Attribute>{});
5978+
}
5979+
return nullptr;
5980+
}
5981+
59665982
// AtenTrilIndicesOp
59675983
//===----------------------------------------------------------------------===//
59685984

@@ -6000,6 +6016,22 @@ LogicalResult AtenTrilIndicesOp::verify() {
60006016
return success();
60016017
}
60026018

6019+
OpFoldResult AtenTrilIndicesOp::fold(FoldAdaptor adaptor) {
6020+
int64_t row, col;
6021+
if (matchPattern(getRow(), m_TorchConstantInt(&row)) &&
6022+
matchPattern(getCol(), m_TorchConstantInt(&col)) && row == 0 &&
6023+
col == 0) {
6024+
// Get the result type (should be a tensor type)
6025+
auto resultTy = dyn_cast<ValueTensorType>(getType());
6026+
if (!resultTy || !resultTy.hasSizes() || !resultTy.hasDtype())
6027+
return nullptr;
6028+
auto shapedTy = resultTy.toBuiltinTensor();
6029+
// Return an empty tensor (0 elements)
6030+
return DenseElementsAttr::get(shapedTy, ArrayRef<Attribute>{});
6031+
}
6032+
return nullptr;
6033+
}
6034+
60036035
// AtenRot90Op
60046036
//===----------------------------------------------------------------------===//
60056037

lib/Dialect/Torch/Transforms/InlineGlobalSlots.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,8 @@ bool InlineGlobalSlotsAnalysis::isValueSafeTransferFunction(Value value) {
251251

252252
SmallVector<Operation *> getBackwardSliceIncludingRoot(Value initialValue) {
253253
SetVector<Operation *> sliceSet;
254-
getBackwardSlice(initialValue, &sliceSet);
254+
LogicalResult result = getBackwardSlice(initialValue, &sliceSet);
255+
assert(result.succeeded() && "expected a backward slice");
255256
SmallVector<Operation *> slice;
256257
llvm::append_range(slice, sliceSet);
257258
slice.push_back(initialValue.getDefiningOp());

lib/RefBackend/RefBackend.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ bufferizeMLProgramGlobaStoreOp(ml_program::GlobalStoreOp globalStoreOp,
279279
Value memref = b.create<memref::GetGlobalOp>(
280280
globalStoreOp.getLoc(), memrefType,
281281
globalStoreOp.getGlobalAttr().getLeafReference());
282-
Value copyValue = b.create<bufferization::ToMemrefOp>(
282+
Value copyValue = b.create<bufferization::ToBufferOp>(
283283
globalStoreOp->getLoc(), memrefType, globalStoreOp.getValue());
284284
b.create<memref::CopyOp>(globalStoreOp->getLoc(), copyValue, memref);
285285
return success();

projects/pt1/e2e_testing/xfail_sets.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -534,6 +534,7 @@
534534
"AvgPool2dSingleIntTupleParamsIncludePadModule_basic",
535535
"AvgPool2dSingleIntTupleParamsModule_basic",
536536
"SliceOutOfLowerBoundEndIndexModule_basic",
537+
"RollModule_basic",
537538
}
538539

539540
FX_IMPORTER_STABLEHLO_XFAIL_SET = {

projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1199,11 +1199,13 @@ def emit_with_mutating_variants(key, **kwargs):
11991199
emit(
12001200
"aten::triu_indices : (int, int, int, int?, int?, Device?, bool?) -> (Tensor)",
12011201
has_verifier=True,
1202+
has_folder=True,
12021203
)
12031204

12041205
emit(
12051206
"aten::tril_indices : (int, int, int, int?, int?, Device?, bool?) -> (Tensor)",
12061207
has_verifier=True,
1208+
has_folder=True,
12071209
)
12081210

12091211
emit("aten::deg2rad : (Tensor) -> (Tensor)")

test/Dialect/TMTensor/bufferize.mlir

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
// CHECK-LABEL: func.func @scan_1d_inclusive(
55
// CHECK-SAME: %[[IN_TENSOR:.*]]: tensor<128xi32>, %[[OUT_TENSOR:.*]]: tensor<128xi32>,
66
// CHECK-SAME: %[[ACC_TENSOR:.*]]: tensor<i32>) -> (tensor<128xi32>, tensor<i32>) {
7-
// CHECK-DAG: %[[IN_MEMREF:.*]] = bufferization.to_memref %[[IN_TENSOR]] : tensor<128xi32> to memref<128xi32>
7+
// CHECK-DAG: %[[IN_MEMREF:.*]] = bufferization.to_buffer %[[IN_TENSOR]] : tensor<128xi32> to memref<128xi32>
88
// CHECK-DAG: %[[OUT_MEMREF_NEW:.*]] = memref.alloc() : memref<128xi32>
99
// CHECK-DAG: %[[ACC_MEMREF_NEW:.*]] = memref.alloc() : memref<i32>
1010
// CHECK-DAG: %[[OUT_TENSOR_NEW:.*]] = bufferization.to_tensor %[[OUT_MEMREF_NEW]] : memref<128xi32> to tensor<128xi32>
@@ -30,8 +30,8 @@ func.func @scan_1d_inclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc:
3030
// CHECK-LABEL: func.func @scan_1d_exclusive(
3131
// CHECK-SAME: %[[IN_TENSOR:.*]]: tensor<128xi32>, %[[OUT_TENSOR:.*]]: tensor<128xi32>,
3232
// CHECK-SAME: %[[ACC_TENSOR:.*]]: tensor<i32>) -> (tensor<128xi32>, tensor<i32>) {
33-
// CHECK-DAG: %[[IN_MEMREF:.*]] = bufferization.to_memref %[[IN_TENSOR]] : tensor<128xi32> to memref<128xi32>
34-
// CHECK-DAG: %[[ACC_MEMREF:.*]] = bufferization.to_memref %[[ACC_TENSOR]] : tensor<i32> to memref<i32>
33+
// CHECK-DAG: %[[IN_MEMREF:.*]] = bufferization.to_buffer %[[IN_TENSOR]] : tensor<128xi32> to memref<128xi32>
34+
// CHECK-DAG: %[[ACC_MEMREF:.*]] = bufferization.to_buffer %[[ACC_TENSOR]] : tensor<i32> to memref<i32>
3535
// CHECK-DAG: %[[OUT_MEMREF_NEW:.*]] = memref.alloc() : memref<128xi32>
3636
// CHECK-DAG: %[[ACC_MEMREF_NEW:.*]] = memref.alloc() : memref<i32>
3737
// CHECK-DAG: %[[OUT_TENSOR_NEW:.*]] = bufferization.to_tensor %[[OUT_MEMREF_NEW]] : memref<128xi32> to tensor<128xi32>
@@ -59,9 +59,9 @@ func.func @scan_1d_exclusive(%in: tensor<128xi32>, %out: tensor<128xi32>, %acc:
5959
// CHECK-SAME: %[[ORIG_TENSOR:.*]]: tensor<8xi32>,
6060
// CHECK-SAME: %[[INDICES_TENSOR:.*]]: tensor<3x1xi32>,
6161
// CHECK-SAME: %[[UPDATES_TENSOR:.*]]: tensor<3xi32>) -> tensor<8xi32> {
62-
// CHECK-DAG: %[[UPDATES_MEMREF:.*]] = bufferization.to_memref %[[UPDATES_TENSOR]] : tensor<3xi32> to memref<3xi32>
63-
// CHECK-DAG: %[[INDICES_MEMREF:.*]] = bufferization.to_memref %[[INDICES_TENSOR]] : tensor<3x1xi32> to memref<3x1xi32>
64-
// CHECK-DAG: %[[ORIG_MEMREF:.*]] = bufferization.to_memref %[[ORIG_TENSOR]] : tensor<8xi32> to memref<8xi32>
62+
// CHECK-DAG: %[[UPDATES_MEMREF:.*]] = bufferization.to_buffer %[[UPDATES_TENSOR]] : tensor<3xi32> to memref<3xi32>
63+
// CHECK-DAG: %[[INDICES_MEMREF:.*]] = bufferization.to_buffer %[[INDICES_TENSOR]] : tensor<3x1xi32> to memref<3x1xi32>
64+
// CHECK-DAG: %[[ORIG_MEMREF:.*]] = bufferization.to_buffer %[[ORIG_TENSOR]] : tensor<8xi32> to memref<8xi32>
6565
// CHECK-DAG: %[[ORIG_MEMREF_NEW:.*]] = memref.alloc() : memref<8xi32>
6666
// CHECK-DAG: %[[OUT_TENSOR:.*]] = bufferization.to_tensor %[[ORIG_MEMREF_NEW]] : memref<8xi32> to tensor<8xi32>
6767
// CHECK: memref.copy %[[ORIG_MEMREF]], %[[ORIG_MEMREF_NEW]] : memref<8xi32> to memref<8xi32>
@@ -87,9 +87,9 @@ func.func @scatter_update_scalar_1D(
8787
// CHECK-SAME: %[[ORIG_TENSOR:.*]]: tensor<8xi32>,
8888
// CHECK-SAME: %[[INDICES_TENSOR:.*]]: tensor<3x1xi32>,
8989
// CHECK-SAME: %[[UPDATES_TENSOR:.*]]: tensor<3xi32>) -> tensor<8xi32> {
90-
// CHECK-DAG: %[[UPDATES_MEMREF:.*]] = bufferization.to_memref %[[UPDATES_TENSOR]] : tensor<3xi32> to memref<3xi32>
91-
// CHECK-DAG: %[[INDICES_MEMREF:.*]] = bufferization.to_memref %[[INDICES_TENSOR]] : tensor<3x1xi32> to memref<3x1xi32>
92-
// CHECK-DAG: %[[ORIG_MEMREF:.*]] = bufferization.to_memref %[[ORIG_TENSOR]] : tensor<8xi32> to memref<8xi32>
90+
// CHECK-DAG: %[[UPDATES_MEMREF:.*]] = bufferization.to_buffer %[[UPDATES_TENSOR]] : tensor<3xi32> to memref<3xi32>
91+
// CHECK-DAG: %[[INDICES_MEMREF:.*]] = bufferization.to_buffer %[[INDICES_TENSOR]] : tensor<3x1xi32> to memref<3x1xi32>
92+
// CHECK-DAG: %[[ORIG_MEMREF:.*]] = bufferization.to_buffer %[[ORIG_TENSOR]] : tensor<8xi32> to memref<8xi32>
9393
// CHECK-DAG: %[[ORIG_MEMREF_NEW:.*]] = memref.alloc() : memref<8xi32>
9494
// CHECK-DAG: %[[OUT_TENSOR:.*]] = bufferization.to_tensor %[[ORIG_MEMREF_NEW]] : memref<8xi32> to tensor<8xi32>
9595
// CHECK: memref.copy %[[ORIG_MEMREF]], %[[ORIG_MEMREF_NEW]] : memref<8xi32> to memref<8xi32>

test/RefBackend/mlprogram-bufferize.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
// CHECK: %[[NEXT_SEED:.*]] = arith.muli %[[SEED]], %[[CST127]] : i64
1010
// CHECK: %[[INSERTED:.*]] = tensor.insert %[[NEXT_SEED]] into %[[TENSOR]][] : tensor<i64>
1111
// CHECK: %[[GLOBAL_SEED_1:.*]] = memref.get_global @global_seed : memref<i64>
12-
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[INSERTED]] : tensor<i64> to memref<i64>
12+
// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[INSERTED]] : tensor<i64> to memref<i64>
1313
// CHECK: memref.copy %[[MEMREF]], %[[GLOBAL_SEED_1]] : memref<i64> to memref<i64>
1414
// CHECK: return %[[NEXT_SEED]] : i64
1515
module {

0 commit comments

Comments
 (0)