Skip to content

Enable mypy lintrunner, Part 2 (codegen/*, docs/*) #7493

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -292,9 +292,9 @@ include_patterns = [
# TODO(https://github.com/pytorch/executorch/issues/7441): Gradually start enabling all folders.
# 'backends/**/*.py',
'build/**/*.py',
# 'codegen/**/*.py',
'codegen/**/*.py',
# 'devtools/**/*.py',
# 'docs/**/*.py',
'docs/**/*.py',
# 'examples/**/*.py',
# 'exir/**/*.py',
# 'extension/**/*.py',
Expand Down
33 changes: 24 additions & 9 deletions .mypy.ini
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,6 @@ local_partial_types = True
enable_error_code = possibly-undefined
warn_unused_ignores = False

# TODO(https://github.com/pytorch/executorch/issues/7441): Remove this
# disable_error_code = import-untyped

files =
backends,
codegen,
Expand All @@ -31,35 +28,53 @@ mypy_path = executorch
[mypy-executorch.codegen.*]
follow_untyped_imports = True

[mypy-executorch.extension.*]
[mypy-executorch.devtools.*]
follow_untyped_imports = True

[mypy-executorch.exir.*]
follow_untyped_imports = True

[mypy-executorch.extension.*]
follow_untyped_imports = True

[mypy-executorch.kernels.*]
follow_untyped_imports = True

[mypy-executorch.runtime.*]
follow_untyped_imports = True

[mypy-requests.*]
follow_untyped_imports = True

[mypy-torchgen.*]
follow_untyped_imports = True

[mypy-setuptools.*]
[mypy-buck_util]
ignore_missing_imports = True

[mypy-buck_util]
[mypy-docutils.*]
ignore_missing_imports = True

[mypy-tomllib]
[mypy-pandas]
ignore_missing_imports = True

[mypy-zstd]
[mypy-pytorch_sphinx_theme]
ignore_missing_imports = True

[mypy-ruamel]
ignore_missing_imports = True

[mypy-setuptools.*]
ignore_missing_imports = True

[mypy-sphinx.*]
ignore_missing_imports = True

[mypy-tomllib]
ignore_missing_imports = True

[mypy-yaml]
ignore_missing_imports = True

[mypy-ruamel]
[mypy-zstd]
ignore_missing_imports = True
5 changes: 3 additions & 2 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import glob
import os
import sys
from typing import Any

import pytorch_sphinx_theme

Expand Down Expand Up @@ -103,7 +104,7 @@

myst_heading_anchors = 4

sphinx_gallery_conf = {
sphinx_gallery_conf: dict[str, Any] = {
"examples_dirs": ["tutorials_source"],
"ignore_pattern": "template_tutorial.py",
"gallery_dirs": ["tutorials"],
Expand Down Expand Up @@ -197,7 +198,7 @@
SupportedDevices,
SupportedProperties,
)
from docutils.parsers import rst
from docutils.parsers import rst # type: ignore[import-untyped]

# Register custom directives

Expand Down
4 changes: 2 additions & 2 deletions docs/source/custom_directives.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ class SupportedDevices(BaseShield):
required_arguments = 1
final_argument_whitespace = True

def run(self) -> List[nodes.Node]:
def run(self, params, alt, _) -> List[nodes.Node]:
devices = _parse_devices(self.arguments[0])
alt = f"This feature supports the following devices: {devices}"
params = {
Expand All @@ -121,7 +121,7 @@ class SupportedProperties(BaseShield):
required_arguments = 1
final_argument_whitespace = True

def run(self) -> List[nodes.Node]:
def run(self, params, alt, _) -> List[nodes.Node]:
properties = _parse_properties(self.arguments[0])
alt = f"This API supports the following properties: {properties}"
params = {
Expand Down
2 changes: 1 addition & 1 deletion docs/source/executorch_custom_versions.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
"pytorch.txt",
]

variables = {}
variables: dict[str, str] = {}


def read_version_files():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def forward(self, x):
# Via EventBlocks
for event in event_block.events:
if event.name == "native_call_addmm.out":
print(event.name, event.perf_data.raw)
print(event.name, event.perf_data.raw if event.perf_data else "")

# Via Dataframe
df = event_block.to_dataframe()
Expand Down Expand Up @@ -264,11 +264,12 @@ def forward(self, x):
df = df[df.event_name == "native_call_convolution.out"]
if len(df) > 0:
slowest = df.loc[df["p50"].idxmax()]
print(slowest.event_name)
assert slowest
print(slowest.name)
print()
pp.pprint(slowest.stack_traces)
pp.pprint(slowest.stack_traces if slowest.stack_traces else "")
print()
pp.pprint(slowest.module_hierarchy)
pp.pprint(slowest.module_hierarchy if slowest.module_hierarchy else "")

######################################################################
# If a user wants the total runtime of a module, they can use
Expand Down
81 changes: 30 additions & 51 deletions docs/source/tutorials_source/export-to-executorch-tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.relu(a)


example_args = (torch.randn(1, 3, 256, 256),)
example_args: tuple[torch.Tensor] = (torch.randn(1, 3, 256, 256),)
aten_dialect: ExportedProgram = export(SimpleConv(), example_args, strict=True)
print(aten_dialect)

Expand Down Expand Up @@ -100,8 +100,11 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y


example_args = (torch.randn(3, 3), torch.randn(3, 3))
aten_dialect: ExportedProgram = export(Basic(), example_args, strict=True)
example_args_2: tuple[torch.Tensor, torch.Tensor] = (
torch.randn(3, 3),
torch.randn(3, 3),
)
aten_dialect = export(Basic(), example_args_2, strict=True)

# Works correctly
print(aten_dialect.module()(torch.ones(3, 3), torch.ones(3, 3)))
Expand All @@ -118,20 +121,11 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:

from torch.export import Dim


class Basic(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y


example_args = (torch.randn(3, 3), torch.randn(3, 3))
example_args_2 = (torch.randn(3, 3), torch.randn(3, 3))
dim1_x = Dim("dim1_x", min=1, max=10)
dynamic_shapes = {"x": {1: dim1_x}, "y": {1: dim1_x}}
aten_dialect: ExportedProgram = export(
Basic(), example_args, dynamic_shapes=dynamic_shapes, strict=True
aten_dialect = export(
Basic(), example_args_2, dynamic_shapes=dynamic_shapes, strict=True
)
print(aten_dialect)

Expand Down Expand Up @@ -207,13 +201,13 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
)

quantizer = XNNPACKQuantizer().set_global(get_symmetric_quantization_config())
prepared_graph = prepare_pt2e(pre_autograd_aten_dialect, quantizer)
prepared_graph = prepare_pt2e(pre_autograd_aten_dialect, quantizer) # type: ignore[arg-type]
# calibrate with a sample dataset
converted_graph = convert_pt2e(prepared_graph)
print("Quantized Graph")
print(converted_graph)

aten_dialect: ExportedProgram = export(converted_graph, example_args, strict=True)
aten_dialect = export(converted_graph, example_args, strict=True)
print("ATen Dialect Graph")
print(aten_dialect)

Expand Down Expand Up @@ -243,7 +237,7 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
from executorch.exir import EdgeProgramManager, to_edge

example_args = (torch.randn(1, 3, 256, 256),)
aten_dialect: ExportedProgram = export(SimpleConv(), example_args, strict=True)
aten_dialect = export(SimpleConv(), example_args, strict=True)

edge_program: EdgeProgramManager = to_edge(aten_dialect)
print("Edge Dialect Graph")
Expand Down Expand Up @@ -272,9 +266,7 @@ def forward(self, x):
decode_args = (torch.randn(1, 5),)
aten_decode: ExportedProgram = export(Decode(), decode_args, strict=True)

edge_program: EdgeProgramManager = to_edge(
{"encode": aten_encode, "decode": aten_decode}
)
edge_program = to_edge({"encode": aten_encode, "decode": aten_decode})
for method in edge_program.methods:
print(f"Edge Dialect graph of {method}")
print(edge_program.exported_program(method))
Expand All @@ -291,8 +283,8 @@ def forward(self, x):
# rather than the ``torch.ops.aten`` namespace.

example_args = (torch.randn(1, 3, 256, 256),)
aten_dialect: ExportedProgram = export(SimpleConv(), example_args, strict=True)
edge_program: EdgeProgramManager = to_edge(aten_dialect)
aten_dialect = export(SimpleConv(), example_args, strict=True)
edge_program = to_edge(aten_dialect)
print("Edge Dialect Graph")
print(edge_program.exported_program())

Expand Down Expand Up @@ -357,8 +349,8 @@ def forward(self, x):

# Export and lower the module to Edge Dialect
example_args = (torch.ones(1),)
aten_dialect: ExportedProgram = export(LowerableModule(), example_args, strict=True)
edge_program: EdgeProgramManager = to_edge(aten_dialect)
aten_dialect = export(LowerableModule(), example_args, strict=True)
edge_program = to_edge(aten_dialect)
to_be_lowered_module = edge_program.exported_program()

from executorch.exir.backend.backend_api import LoweredBackendModule, to_backend
Expand All @@ -369,7 +361,7 @@ def forward(self, x):
)

# Lower the module
lowered_module: LoweredBackendModule = to_backend(
lowered_module: LoweredBackendModule = to_backend( # type: ignore[call-arg]
"BackendWithCompilerDemo", to_be_lowered_module, []
)
print(lowered_module)
Expand Down Expand Up @@ -423,8 +415,8 @@ def forward(self, x):


example_args = (torch.ones(1),)
aten_dialect: ExportedProgram = export(ComposedModule(), example_args, strict=True)
edge_program: EdgeProgramManager = to_edge(aten_dialect)
aten_dialect = export(ComposedModule(), example_args, strict=True)
edge_program = to_edge(aten_dialect)
exported_program = edge_program.exported_program()
print("Edge Dialect graph")
print(exported_program)
Expand Down Expand Up @@ -460,16 +452,16 @@ def forward(self, a, x, b):
return z


example_args = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2))
aten_dialect: ExportedProgram = export(Foo(), example_args, strict=True)
edge_program: EdgeProgramManager = to_edge(aten_dialect)
example_args_3 = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2))
aten_dialect = export(Foo(), example_args_3, strict=True)
edge_program = to_edge(aten_dialect)
exported_program = edge_program.exported_program()
print("Edge Dialect graph")
print(exported_program)

from executorch.exir.backend.test.op_partitioner_demo import AddMulPartitionerDemo

delegated_program = to_backend(exported_program, AddMulPartitionerDemo())
delegated_program = to_backend(exported_program, AddMulPartitionerDemo()) # type: ignore[call-arg]
print("Delegated program")
print(delegated_program)
print(delegated_program.graph_module.lowered_module_0.original_module)
Expand All @@ -484,19 +476,9 @@ def forward(self, a, x, b):
# call ``to_backend`` on it:


class Foo(torch.nn.Module):
def forward(self, a, x, b):
y = torch.mm(a, x)
z = y + b
a = z - a
y = torch.mm(a, x)
z = y + b
return z


example_args = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2))
aten_dialect: ExportedProgram = export(Foo(), example_args, strict=True)
edge_program: EdgeProgramManager = to_edge(aten_dialect)
example_args_3 = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2))
aten_dialect = export(Foo(), example_args_3, strict=True)
edge_program = to_edge(aten_dialect)
exported_program = edge_program.exported_program()
delegated_program = edge_program.to_backend(AddMulPartitionerDemo())

Expand Down Expand Up @@ -530,7 +512,6 @@ def forward(self, a, x, b):
print("ExecuTorch Dialect")
print(executorch_program.exported_program())

import executorch.exir as exir

######################################################################
# Notice that in the graph we now see operators like ``torch.ops.aten.sub.out``
Expand Down Expand Up @@ -577,13 +558,11 @@ def forward(self, x):
pre_autograd_aten_dialect = export_for_training(M(), example_args).module()
# Optionally do quantization:
# pre_autograd_aten_dialect = convert_pt2e(prepare_pt2e(pre_autograd_aten_dialect, CustomBackendQuantizer))
aten_dialect: ExportedProgram = export(
pre_autograd_aten_dialect, example_args, strict=True
)
edge_program: exir.EdgeProgramManager = exir.to_edge(aten_dialect)
aten_dialect = export(pre_autograd_aten_dialect, example_args, strict=True)
edge_program = to_edge(aten_dialect)
# Optionally do delegation:
# edge_program = edge_program.to_backend(CustomBackendPartitioner)
executorch_program: exir.ExecutorchProgramManager = edge_program.to_executorch(
executorch_program = edge_program.to_executorch(
ExecutorchBackendConfig(
passes=[], # User-defined passes
)
Expand Down
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -710,6 +710,7 @@ def get_ext_modules() -> List[Extension]:
# include. See also setuptools/discovery.py for custom finders.
package_dir={
"executorch/backends": "backends",
"executorch/codegen": "codegen",
# TODO(mnachin T180504136): Do not put examples/models
# into core pip packages. Refactor out the necessary utils
# or core models files into a separate package.
Expand Down