Skip to content

add INC_PT_ONLY and INC_TF_ONLY #2202

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions docs/source/installation_guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,24 @@ pip install neural-compressor-tf
[optional] pip install -r requirements_tf.txt # for TensorFlow framework extension API
```

- [PyTorch only]
```Shell
git clone https://github.com/intel/neural-compressor.git
cd neural-compressor
pip install -r requirements_pt.txt # for PyTorch framework extension API
export INC_PT_ONLY=1
pip install -e .
```

- [TensorFlow only]
```Shell
git clone https://github.com/intel/neural-compressor.git
cd neural-compressor
pip install -r requirements_tf.txt # for TensorFlow framework extension API
export INC_TF_ONLY=1
pip install -e .
```

### Install from AI Kit

The Intel® Neural Compressor library is released as part of the [Intel® oneAPI AI Analytics Toolkit](https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit.html) (AI Kit). The AI Kit provides a consolidated package of Intel's latest deep learning and machine optimizations all in one place for ease of development. Along with Neural Compressor, the AI Kit includes Intel-optimized versions of deep learning frameworks (such as TensorFlow and PyTorch) and high-performing Python libraries to streamline end-to-end data science and AI workflows on Intel architectures.
Expand Down
28 changes: 15 additions & 13 deletions neural_compressor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,18 @@
"""Intel® Neural Compressor: An open-source Python library supporting popular model compression techniques."""
from .version import __version__

# we need to set a global 'NA' backend, or Model can't be used
from .config import (
DistillationConfig,
PostTrainingQuantConfig,
WeightPruningConfig,
QuantizationAwareTrainingConfig,
MixedPrecisionConfig,
)
from .contrib import *
from .model import *
from .metric import *
from .utils import options
from .utils.utility import set_random_seed, set_tensorboard, set_workspace, set_resume_from
import os

if not (os.environ.get("INC_PT_ONLY", False) or os.environ.get("INC_TF_ONLY", False)):
from .config import (
DistillationConfig,
PostTrainingQuantConfig,
WeightPruningConfig,
QuantizationAwareTrainingConfig,
MixedPrecisionConfig,
)
from .contrib import *
from .model import *
from .metric import *
from .utils import options
from .utils.utility import set_random_seed, set_tensorboard, set_workspace, set_resume_from
21 changes: 8 additions & 13 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,22 +95,17 @@ def get_build_version():


if __name__ == "__main__":
cfg_key = "neural_compressor"

# Temporary implementation of fp8 tensor saving and loading
# Will remove after Habana torch applies below patch:
# https://github.com/pytorch/pytorch/pull/114662
ext_modules = []
cmdclass = {}

if "pt" in sys.argv:
sys.argv.remove("pt")
if os.environ.get("INC_PT_ONLY", False) and os.environ.get("INC_TF_ONLY", False):
raise ValueError("Both INC_PT_ONLY and INC_TF_ONLY are set. Please set only one.")
if os.environ.get("INC_PT_ONLY", False):
cfg_key = "neural_compressor_pt"

if "tf" in sys.argv:
sys.argv.remove("tf")
elif os.environ.get("INC_TF_ONLY", False):
cfg_key = "neural_compressor_tf"
else:
cfg_key = "neural_compressor"

ext_modules = []
cmdclass = {}
project_name = PKG_INSTALL_CFG[cfg_key].get("project_name")
include_packages = PKG_INSTALL_CFG[cfg_key].get("include_packages") or {}
package_data = PKG_INSTALL_CFG[cfg_key].get("package_data") or {}
Expand Down
3 changes: 3 additions & 0 deletions test/3x/torch/algorithms/fp8_quant/conftest.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
# Called once at the beginning of the test session
def pytest_sessionstart():
import os
import habana_frameworks.torch.core as htcore
import torch

htcore.hpu_set_env()

# Use reproducible results
torch.use_deterministic_algorithms(True)
# Ensure that only 3x PyTorch part of INC is imported
os.environ.setdefault("INC_PT_ONLY", "1")

# Fix the seed - just in case
torch.manual_seed(0)
2 changes: 2 additions & 0 deletions test/3x/torch/quantization/fp8_quant/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# Ensure that the HPU is in lazy mode and weight sharing is disabled
os.environ.setdefault("PT_HPU_LAZY_MODE", "1")
os.environ.setdefault("PT_HPU_WEIGHT_SHARING", "0")
# Ensure that only 3x PyTorch part of INC is imported
os.environ.setdefault("INC_PT_ONLY", "1")


def pytest_sessionstart():
Expand Down
Loading