Skip to content

(transform): convert to csl_stencil.apply #2803

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Jul 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 28 additions & 22 deletions tests/filecheck/transforms/stencil-to-csl-stencil.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: xdsl-opt %s -p "stencil-to-csl-stencil" | filecheck %s
// RUN: xdsl-opt %s -p "stencil-to-csl-stencil{num_chunks=2}" | filecheck %s

builtin.module {
func.func @gauss_seidel(%a : !stencil.field<[-1,1023]x[-1,511]xtensor<512xf32>>, %b : !stencil.field<[-1,1023]x[-1,511]xtensor<512xf32>>) {
Expand Down Expand Up @@ -37,27 +37,33 @@ builtin.module {
// CHECK-NEXT: func.func @gauss_seidel(%a : !stencil.field<[-1,1023]x[-1,511]xtensor<512xf32>>, %b : !stencil.field<[-1,1023]x[-1,511]xtensor<512xf32>>) {
// CHECK-NEXT: %0 = stencil.load %a : !stencil.field<[-1,1023]x[-1,511]xtensor<512xf32>> -> !stencil.temp<[-1,2]x[-1,2]xtensor<512xf32>>
// CHECK-NEXT: %1 = "csl_stencil.prefetch"(%0) <{"topo" = #dmp.topo<1022x510>, "swaps" = [#csl_stencil.exchange<to [1, 0]>, #csl_stencil.exchange<to [-1, 0]>, #csl_stencil.exchange<to [0, 1]>, #csl_stencil.exchange<to [0, -1]>]}> : (!stencil.temp<[-1,2]x[-1,2]xtensor<512xf32>>) -> memref<4xtensor<510xf32>>
// CHECK-NEXT: %2 = stencil.apply(%3 = %0 : !stencil.temp<[-1,2]x[-1,2]xtensor<512xf32>>, %4 = %1 : memref<4xtensor<510xf32>>) -> (!stencil.temp<[0,1]x[0,1]xtensor<510xf32>>) {
// CHECK-NEXT: %5 = arith.constant 1.666600e-01 : f32
// CHECK-NEXT: %6 = csl_stencil.access %4[1, 0] : memref<4xtensor<510xf32>>
// CHECK-NEXT: %7 = csl_stencil.access %4[-1, 0] : memref<4xtensor<510xf32>>
// CHECK-NEXT: %8 = csl_stencil.access %3[0, 0] : !stencil.temp<[-1,2]x[-1,2]xtensor<512xf32>>
// CHECK-NEXT: %9 = "tensor.extract_slice"(%8) <{"static_offsets" = array<i64: 1>, "static_sizes" = array<i64: 510>, "static_strides" = array<i64: 1>, "operandSegmentSizes" = array<i32: 1, 0, 0, 0>}> : (tensor<512xf32>) -> tensor<510xf32>
// CHECK-NEXT: %10 = csl_stencil.access %3[0, 0] : !stencil.temp<[-1,2]x[-1,2]xtensor<512xf32>>
// CHECK-NEXT: %11 = "tensor.extract_slice"(%10) <{"static_offsets" = array<i64: -1>, "static_sizes" = array<i64: 510>, "static_strides" = array<i64: 1>, "operandSegmentSizes" = array<i32: 1, 0, 0, 0>}> : (tensor<512xf32>) -> tensor<510xf32>
// CHECK-NEXT: %12 = csl_stencil.access %4[0, 1] : memref<4xtensor<510xf32>>
// CHECK-NEXT: %13 = csl_stencil.access %4[0, -1] : memref<4xtensor<510xf32>>
// CHECK-NEXT: %14 = arith.addf %13, %12 : tensor<510xf32>
// CHECK-NEXT: %15 = arith.addf %14, %11 : tensor<510xf32>
// CHECK-NEXT: %16 = arith.addf %15, %9 : tensor<510xf32>
// CHECK-NEXT: %17 = arith.addf %16, %7 : tensor<510xf32>
// CHECK-NEXT: %18 = arith.addf %17, %6 : tensor<510xf32>
// CHECK-NEXT: %19 = tensor.empty() : tensor<510xf32>
// CHECK-NEXT: %20 = linalg.fill ins(%5 : f32) outs(%19 : tensor<510xf32>) -> tensor<510xf32>
// CHECK-NEXT: %21 = arith.mulf %18, %20 : tensor<510xf32>
// CHECK-NEXT: stencil.return %21 : tensor<510xf32>
// CHECK-NEXT: }
// CHECK-NEXT: stencil.store %2 to %b ([0, 0] : [1, 1]) : !stencil.temp<[0,1]x[0,1]xtensor<510xf32>> to !stencil.field<[-1,1023]x[-1,511]xtensor<512xf32>>
// CHECK-NEXT: %2 = tensor.empty() : tensor<510xf32>
// CHECK-NEXT: %3 = csl_stencil.apply(%0 : !stencil.temp<[-1,2]x[-1,2]xtensor<512xf32>>, %2 : tensor<510xf32>) -> (!stencil.temp<[0,1]x[0,1]xtensor<510xf32>>) ({
// CHECK-NEXT: ^0(%4 : memref<4xtensor<255xf32>>, %5 : index, %6 : tensor<510xf32>):
// CHECK-NEXT: %7 = csl_stencil.access %4[1, 0] : memref<4xtensor<255xf32>>
// CHECK-NEXT: %8 = csl_stencil.access %4[-1, 0] : memref<4xtensor<255xf32>>
// CHECK-NEXT: %9 = csl_stencil.access %4[0, 1] : memref<4xtensor<255xf32>>
// CHECK-NEXT: %10 = csl_stencil.access %4[0, -1] : memref<4xtensor<255xf32>>
// CHECK-NEXT: %11 = arith.addf %8, %7 : tensor<255xf32>
// CHECK-NEXT: %12 = arith.addf %10, %9 : tensor<255xf32>
// CHECK-NEXT: %13 = arith.addf %12, %11 : tensor<255xf32>
// CHECK-NEXT: %14 = "tensor.insert_slice"(%13, %6, %5) <{"static_offsets" = array<i64: 0>, "static_sizes" = array<i64: 255>, "static_strides" = array<i64: 1>, "operandSegmentSizes" = array<i32: 1, 1, 1, 0, 0>}> : (tensor<255xf32>, tensor<510xf32>, index) -> tensor<510xf32>
// CHECK-NEXT: csl_stencil.yield %14 : tensor<510xf32>
// CHECK-NEXT: }, {
// CHECK-NEXT: ^1(%15 : !stencil.temp<[-1,2]x[-1,2]xtensor<512xf32>>, %16 : tensor<510xf32>):
// CHECK-NEXT: %17 = csl_stencil.access %15[0, 0] : !stencil.temp<[-1,2]x[-1,2]xtensor<512xf32>>
// CHECK-NEXT: %18 = csl_stencil.access %15[0, 0] : !stencil.temp<[-1,2]x[-1,2]xtensor<512xf32>>
// CHECK-NEXT: %19 = arith.constant 1.666600e-01 : f32
// CHECK-NEXT: %20 = "tensor.extract_slice"(%17) <{"static_offsets" = array<i64: 1>, "static_sizes" = array<i64: 510>, "static_strides" = array<i64: 1>, "operandSegmentSizes" = array<i32: 1, 0, 0, 0>}> : (tensor<512xf32>) -> tensor<510xf32>
// CHECK-NEXT: %21 = "tensor.extract_slice"(%18) <{"static_offsets" = array<i64: -1>, "static_sizes" = array<i64: 510>, "static_strides" = array<i64: 1>, "operandSegmentSizes" = array<i32: 1, 0, 0, 0>}> : (tensor<512xf32>) -> tensor<510xf32>
// CHECK-NEXT: %22 = arith.addf %16, %21 : tensor<510xf32>
// CHECK-NEXT: %23 = arith.addf %22, %20 : tensor<510xf32>
// CHECK-NEXT: %24 = tensor.empty() : tensor<510xf32>
// CHECK-NEXT: %25 = linalg.fill ins(%19 : f32) outs(%24 : tensor<510xf32>) -> tensor<510xf32>
// CHECK-NEXT: %26 = arith.mulf %23, %25 : tensor<510xf32>
// CHECK-NEXT: csl_stencil.yield %26 : tensor<510xf32>
// CHECK-NEXT: })
// CHECK-NEXT: stencil.store %3 to %b ([0, 0] : [1, 1]) : !stencil.temp<[0,1]x[0,1]xtensor<510xf32>> to !stencil.field<[-1,1023]x[-1,511]xtensor<512xf32>>
// CHECK-NEXT: func.return
// CHECK-NEXT: }
// CHECK-NEXT: }
10 changes: 5 additions & 5 deletions xdsl/dialects/csl/csl_stencil.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,24 +116,24 @@ class PrefetchOp(IRDLOperation):
stencil.TempType[Attribute] | memref.MemRefType[Attribute]
)

swaps = opt_prop_def(builtin.ArrayAttr[ExchangeDeclarationAttr])
swaps = prop_def(builtin.ArrayAttr[ExchangeDeclarationAttr])

topo = opt_prop_def(dmp.RankTopoAttr)
topo = prop_def(dmp.RankTopoAttr)

result = result_def(memref.MemRefType)

def __init__(
self,
input_stencil: SSAValue | Operation,
topo: dmp.RankTopoAttr | None = None,
swaps: Sequence[ExchangeDeclarationAttr] | None = None,
topo: dmp.RankTopoAttr,
swaps: Sequence[ExchangeDeclarationAttr],
result_type: memref.MemRefType[Attribute] | None = None,
):
super().__init__(
operands=[input_stencil],
properties={
"topo": topo,
"swaps": builtin.ArrayAttr(swaps if swaps else []),
"swaps": builtin.ArrayAttr(swaps),
},
result_types=[result_type],
)
Expand Down
37 changes: 37 additions & 0 deletions xdsl/dialects/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,43 @@ class InsertSliceOp(IRDLOperation):

irdl_options = [AttrSizedOperandSegments(as_property=True)]

@staticmethod
def get(
source: Operand,
dest: Operand,
static_sizes: Sequence[int],
static_offsets: Sequence[int] | None = None,
static_strides: Sequence[int] | None = None,
offsets: Sequence[Operand] | None = None,
sizes: Sequence[Operand] | None = None,
strides: Sequence[Operand] | None = None,
result_type: Attribute | None = None,
) -> InsertSliceOp:

dims = len(static_sizes)
return InsertSliceOp.build(
operands=[
source,
dest,
offsets if offsets else [],
sizes if sizes else [],
strides if strides else [],
],
properties={
"static_offsets": DenseArrayBase.from_list(
i64, static_offsets if static_offsets else [0] * dims
),
"static_sizes": DenseArrayBase.from_list(
i64,
static_sizes,
),
"static_strides": DenseArrayBase.from_list(
i64, static_strides if static_strides else [1] * dims
),
},
result_types=[result_type if result_type else dest.type],
)

@staticmethod
def from_static_parameters(
source: SSAValue | Operation,
Expand Down
77 changes: 62 additions & 15 deletions xdsl/transforms/experimental/stencil_tensorize_z_dimension.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from attr import dataclass

from xdsl.context import MLContext
from xdsl.dialects import builtin
from xdsl.dialects.arith import (
Addf,
BinaryOperation,
Expand All @@ -14,13 +15,15 @@
)
from xdsl.dialects.builtin import (
AnyFloat,
ArrayAttr,
ContainerType,
IntAttr,
MemRefType,
ModuleOp,
ShapedType,
TensorType,
)
from xdsl.dialects.csl import csl_stencil
from xdsl.dialects.func import FuncOp
from xdsl.dialects.linalg import FillOp
from xdsl.dialects.stencil import (
Expand All @@ -35,7 +38,7 @@
StoreOp,
TempType,
)
from xdsl.dialects.tensor import EmptyOp, ExtractSliceOp
from xdsl.dialects.tensor import EmptyOp, ExtractSliceOp, InsertSliceOp
from xdsl.ir import (
Attribute,
Operation,
Expand Down Expand Up @@ -69,6 +72,15 @@ def get_required_result_type(op: Operation) -> TensorType[Attribute] | None:
return r_type
# abort when encountering an un-tensorized ReturnOp successor
return None
if (
isinstance(use.operation, InsertSliceOp)
and is_tensor(use.operation.result.type)
and isa(use.operation.static_sizes.data, ArrayAttr[IntAttr])
):
return TensorType(
use.operation.result.type.get_element_type(),
use.operation.static_sizes.data,
)
for ret in use.operation.results:
if isa(r_type := ret.type, TensorType[Attribute]):
return r_type
Expand Down Expand Up @@ -298,6 +310,28 @@ def match_and_rewrite(self, op: AccessOp, rewriter: PatternRewriter, /):
)


class CslStencilAccessOpUpdateShape(RewritePattern):
"""
Updates the result type of a tensorized `csl_stencil.access` op
"""

@op_type_rewrite_pattern
def match_and_rewrite(self, op: csl_stencil.AccessOp, rewriter: PatternRewriter, /):
if typ := get_required_result_type(op):
if needs_update_shape(op.result.type, typ) and (
isa(op.op.type, TempType[TensorType[Attribute]])
or isa(op.op.type, MemRefType[TensorType[Attribute]])
):
rewriter.replace_matched_op(
csl_stencil.AccessOp(
op.op,
op.offset,
op.op.type.get_element_type(),
op.offset_mapping,
)
)


class ExtractSliceOpUpdateShape(RewritePattern):
@op_type_rewrite_pattern
def match_and_rewrite(self, op: ExtractSliceOp, rewriter: PatternRewriter, /):
Expand Down Expand Up @@ -355,6 +389,32 @@ def match_and_rewrite(self, op: FillOp, rewriter: PatternRewriter, /):
)


@dataclass(frozen=True)
class BackpropagateStencilShapes(ModulePass):
"""
Greedily back-propagates the result types of tensorized ops.
Use after creating/modifying tensorization.
"""

name = "backpropagate-stencil-shapes"

def apply(self, ctx: MLContext, op: builtin.ModuleOp) -> None:
backpropagate_stencil_shapes = PatternRewriteWalker(
GreedyRewritePatternApplier(
[
CslStencilAccessOpUpdateShape(),
ExtractSliceOpUpdateShape(),
EmptyOpUpdateShape(),
FillOpUpdateShape(),
ArithOpUpdateShape(),
]
),
walk_reverse=True,
apply_recursively=False,
)
backpropagate_stencil_shapes.rewrite_module(op)


@dataclass(frozen=True)
class StencilTensorizeZDimension(ModulePass):
name = "stencil-tensorize-z-dimension"
Expand Down Expand Up @@ -386,17 +446,4 @@ def apply(self, ctx: MLContext, op: ModuleOp) -> None:
apply_recursively=False,
)
stencil_pass.rewrite_module(op)
backpropagate_stencil_shapes = PatternRewriteWalker(
GreedyRewritePatternApplier(
[
# AccessOpUpdateShape(),
ExtractSliceOpUpdateShape(),
EmptyOpUpdateShape(),
FillOpUpdateShape(),
ArithOpUpdateShape(),
]
),
walk_reverse=True,
apply_recursively=False,
)
backpropagate_stencil_shapes.rewrite_module(op)
BackpropagateStencilShapes().apply(ctx=ctx, op=op)
Loading
Loading