diff --git a/conf.py b/conf.py index f0f4905844..e4bca1ac7f 100644 --- a/conf.py +++ b/conf.py @@ -67,6 +67,12 @@ # # needs_sphinx = '1.0' +html_meta = { + 'description': 'Master PyTorch with our step-by-step tutorials for all skill levels. Start your journey to becoming a PyTorch expert today!', + 'keywords': 'PyTorch, tutorials, Getting Started, deep learning, AI', + 'author': 'PyTorch Contributors' +} + # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. diff --git a/index.rst b/index.rst index 91517834fd..95c4a8f3ef 100644 --- a/index.rst +++ b/index.rst @@ -3,6 +3,7 @@ Welcome to PyTorch Tutorials **What's new in PyTorch tutorials?** +* `torch.export AOTInductor Tutorial for Python runtime (Beta) `__ * `A guide on good usage of non_blocking and pin_memory() in PyTorch `__ * `Introduction to Distributed Pipeline Parallelism `__ * `Introduction to Libuv TCPStore Backend `__ diff --git a/recipes_source/torch_export_aoti_python.py b/recipes_source/torch_export_aoti_python.py index 136862078c..312491b660 100644 --- a/recipes_source/torch_export_aoti_python.py +++ b/recipes_source/torch_export_aoti_python.py @@ -1,7 +1,11 @@ # -*- coding: utf-8 -*- """ -(Beta) ``torch.export`` AOTInductor Tutorial for Python runtime +.. meta:: + :description: An end-to-end example of how to use AOTInductor for Python runtime. + :keywords: torch.export, AOTInductor, torch._inductor.aot_compile, torch._export.aot_load + +``torch.export`` AOTInductor Tutorial for Python runtime (Beta) =============================================================== **Author:** Ankith Gunapal, Bin Bao, Angela Yi """ @@ -18,7 +22,7 @@ # a shared library that can be run in a non-Python environment. # # -# In this tutorial, you will learn an end-to-end example of how to use AOTInductor for python runtime. +# In this tutorial, you will learn an end-to-end example of how to use AOTInductor for Python runtime. # We will look at how to use :func:`torch._inductor.aot_compile` along with :func:`torch.export.export` to generate a # shared library. Additionally, we will examine how to execute the shared library in Python runtime using :func:`torch._export.aot_load`. # You will learn about the speed up seen in the first inference time using AOTInductor, especially when using