Skip to content

Commit

Permalink
Updated versions for black usort and ufmt and reformatted codebase (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
vfdev-5 authored Apr 28, 2023
1 parent 9d38754 commit 96433ce
Show file tree
Hide file tree
Showing 149 changed files with 40 additions and 821 deletions.
3 changes: 0 additions & 3 deletions .github/workflows/trigger_circle_ci.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ def assert_pipeline_created(pipeline_id, headers):


def get_workflow_id(pipeline_id, headers):

while True:
result = requests.get(f"https://circleci.com/api/v2/pipeline/{pipeline_id}/workflow", headers=headers)
assert_result(result, 200)
Expand All @@ -59,7 +58,6 @@ def get_workflow_id(pipeline_id, headers):


def assert_workflows_successful(pipeline_id, headers):

workflow_id = get_workflow_id(pipeline_id, headers)

base_url = "https://app.circleci.com/pipelines/github/pytorch/ignite"
Expand All @@ -84,7 +82,6 @@ def assert_workflows_successful(pipeline_id, headers):


if __name__ == "__main__":

print("Trigger new pipeline on Circle-CI")

if "CIRCLE_TOKEN" not in os.environ:
Expand Down
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@ repos:
exclude_types: ["python", "jupyter", "shell", "gitignore"]

- repo: https://github.com/omnilib/ufmt
rev: v1.3.1
rev: v2.1.0
hooks:
- id: ufmt
additional_dependencies:
- black == 21.12b0
- usort == 1.0.1
- black == 23.3.0
- usort == 1.0.6

- repo: https://github.com/pycqa/flake8
rev: 6.0.0
Expand Down
2 changes: 0 additions & 2 deletions docker/test_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@


def run_python_cmd(cmd):

try_except_cmd = f"""
import warnings
warnings.filterwarnings("ignore")
Expand Down Expand Up @@ -65,7 +64,6 @@ def main():


if __name__ == "__main__":

parser = argparse.ArgumentParser("Check docker image script")
parser.add_argument("image", type=str, help="Docker image to check")
args = parser.parse_args()
Expand Down
4 changes: 0 additions & 4 deletions examples/contrib/cifar10/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@


def training(local_rank, config):

rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
Expand Down Expand Up @@ -205,7 +204,6 @@ def run(
raise RuntimeError("The value of with_amp should be False if backend is xla")

with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:

parallel.run(training, config)


Expand Down Expand Up @@ -283,7 +281,6 @@ def log_basic_info(logger, config):


def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):

device = idist.device()

# Setup Ignite trainer:
Expand All @@ -299,7 +296,6 @@ def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, con
scaler = GradScaler(enabled=with_amp)

def train_step(engine, batch):

x, y = batch[0], batch[1]

if x.device != device:
Expand Down
4 changes: 0 additions & 4 deletions examples/contrib/cifar10_qat/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@


def training(local_rank, config):

rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
Expand Down Expand Up @@ -189,7 +188,6 @@ def run(
spawn_kwargs["nproc_per_node"] = nproc_per_node

with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:

parallel.run(training, config)


Expand Down Expand Up @@ -267,7 +265,6 @@ def log_basic_info(logger, config):


def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):

device = idist.device()

# Setup Ignite trainer:
Expand All @@ -283,7 +280,6 @@ def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, con
scaler = GradScaler(enabled=with_amp)

def train_step(engine, batch):

x, y = batch[0], batch[1]

if x.device != device:
Expand Down
5 changes: 0 additions & 5 deletions examples/contrib/transformers/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@


def training(local_rank, config):

rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
Expand All @@ -33,7 +32,6 @@ def training(local_rank, config):

output_path = config["output_dir"]
if rank == 0:

now = datetime.now().strftime("%Y%m%d-%H%M%S")
folder_name = f"{config['model']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}"
output_path = Path(output_path) / folder_name
Expand Down Expand Up @@ -207,7 +205,6 @@ def run(
spawn_kwargs["nproc_per_node"] = nproc_per_node

with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:

parallel.run(training, config)


Expand Down Expand Up @@ -293,7 +290,6 @@ def log_basic_info(logger, config):


def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):

device = idist.device()

# Setup Ignite trainer:
Expand All @@ -309,7 +305,6 @@ def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, con
scaler = GradScaler(enabled=with_amp)

def train_step(engine, batch):

input_batch = batch[0]
labels = batch[1].view(-1, 1)

Expand Down
1 change: 0 additions & 1 deletion examples/fast_neural_style/neural_style.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ def train(args):
running_avgs = OrderedDict()

def step(engine, batch):

x, _ = batch
x = x.to(device)

Expand Down
2 changes: 0 additions & 2 deletions examples/gan/dcgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,6 @@ def main(
alpha,
output_dir,
):

# seed
check_manual_seed(seed)

Expand Down Expand Up @@ -243,7 +242,6 @@ def get_noise():

# The main function, processing a batch of examples
def step(engine, batch):

# unpack the batch. It comes from a dataset, so we have <images, labels> pairs. Discard labels.
real, _ = batch
real = real.to(device)
Expand Down
2 changes: 0 additions & 2 deletions examples/references/classification/imagenet/dataflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ def opencv_loader(path):


def get_dataloader(dataset, sampler=None, shuffle=False, limit_num_samples=None, **kwargs):

if limit_num_samples is not None:
g = torch.Generator().manual_seed(limit_num_samples)
indices = torch.randperm(len(dataset), generator=g)[:limit_num_samples]
Expand All @@ -38,7 +37,6 @@ def get_train_val_loaders(
limit_train_num_samples: Optional[int] = None,
limit_val_num_samples: Optional[int] = None,
) -> Tuple[DataLoader, DataLoader, DataLoader]:

train_ds = ImageFolder(
Path(root_path) / "train",
transform=lambda sample: train_transforms(image=sample)["image"],
Expand Down
5 changes: 0 additions & 5 deletions examples/references/classification/imagenet/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@


def training(local_rank, config, logger, with_clearml):

rank = idist.get_rank()
manual_seed(config.seed + local_rank)

Expand Down Expand Up @@ -305,7 +304,6 @@ def run_training(config_filepath, backend="nccl", with_clearml=True):
assert config_filepath.exists(), f"File '{config_filepath.as_posix()}' is not found"

with idist.Parallel(backend=backend) as parallel:

logger = setup_logger(name="ImageNet Training", distributed_rank=idist.get_rank())

config = ConfigObject(config_filepath)
Expand All @@ -327,7 +325,6 @@ def run_training(config_filepath, backend="nccl", with_clearml=True):


def get_model_weights(config, logger, with_clearml):

path = ""
if with_clearml:
from clearml import Model
Expand All @@ -352,7 +349,6 @@ def get_model_weights(config, logger, with_clearml):


def evaluation(local_rank, config, logger, with_clearml):

rank = idist.get_rank()
device = idist.device()
manual_seed(config.seed + local_rank)
Expand Down Expand Up @@ -428,5 +424,4 @@ def run_evaluation(config_filepath, backend="nccl", with_clearml=True):


if __name__ == "__main__":

fire.Fire({"training": run_training, "eval": run_evaluation})
1 change: 0 additions & 1 deletion examples/references/classification/imagenet/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@


def initialize(config):

device = idist.device()

model = config.model.to(device)
Expand Down
3 changes: 0 additions & 3 deletions examples/references/segmentation/pascal_voc2012/dataflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ def __getitem__(self, index):


class VOCSegmentationOpencv(VOCSegmentation):

target_names = [
"background",
"aeroplane",
Expand Down Expand Up @@ -114,7 +113,6 @@ def get_train_noval_sbdataset(root_path, return_meta=False):


def get_dataloader(dataset, sampler=None, shuffle=False, limit_num_samples=None, **kwargs):

if limit_num_samples is not None:
g = torch.Generator().manual_seed(limit_num_samples)
indices = torch.randperm(len(dataset), generator=g)[:limit_num_samples]
Expand All @@ -135,7 +133,6 @@ def get_train_val_loaders(
limit_train_num_samples=None,
limit_val_num_samples=None,
):

train_ds = get_train_dataset(root_path)
val_ds = get_val_dataset(root_path)

Expand Down
5 changes: 0 additions & 5 deletions examples/references/segmentation/pascal_voc2012/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ def download_datasets(output_path):


def training(local_rank, config, logger, with_clearml):

rank = idist.get_rank()
manual_seed(config.seed + local_rank)

Expand Down Expand Up @@ -342,7 +341,6 @@ def run_training(config_filepath, backend="nccl", with_clearml=True):
assert config_filepath.exists(), f"File '{config_filepath.as_posix()}' is not found"

with idist.Parallel(backend=backend) as parallel:

logger = setup_logger(name="Pascal-VOC12 Training", distributed_rank=idist.get_rank())

config = ConfigObject(config_filepath)
Expand All @@ -364,7 +362,6 @@ def run_training(config_filepath, backend="nccl", with_clearml=True):


def get_model_weights(config, logger, with_clearml):

path = ""
if with_clearml:
from clearml import Model
Expand All @@ -389,7 +386,6 @@ def get_model_weights(config, logger, with_clearml):


def evaluation(local_rank, config, logger, with_clearml):

rank = idist.get_rank()
device = idist.device()
manual_seed(config.seed + local_rank)
Expand Down Expand Up @@ -472,5 +468,4 @@ def run_evaluation(config_filepath, backend="nccl", with_clearml=True):


if __name__ == "__main__":

fire.Fire({"download": download_datasets, "training": run_training, "eval": run_evaluation})
1 change: 0 additions & 1 deletion examples/references/segmentation/pascal_voc2012/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@


def initialize(config):

device = idist.device()

model = config.model.to(device)
Expand Down
2 changes: 0 additions & 2 deletions examples/reinforcement_learning/actor_critic.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ def finish_episode(policy, optimizer, gamma):


def main(env, args):

policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=3e-2)
timesteps = range(10000)
Expand Down Expand Up @@ -185,7 +184,6 @@ def should_finish_training():


if __name__ == "__main__":

parser = argparse.ArgumentParser(description="Ignite actor-critic example")
parser.add_argument("--gamma", type=float, default=0.99, metavar="G", help="discount factor (default: 0.99)")
parser.add_argument("--seed", type=int, default=543, metavar="N", help="random seed (default: 1)")
Expand Down
2 changes: 0 additions & 2 deletions examples/reinforcement_learning/reinforce.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@ def finish_episode(policy, optimizer, gamma):


def main(env, args):

policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
timesteps = range(10000)
Expand Down Expand Up @@ -123,7 +122,6 @@ def should_finish_training():


if __name__ == "__main__":

parser = argparse.ArgumentParser(description="PyTorch REINFORCE example")
parser.add_argument("--gamma", type=float, default=0.99, metavar="G", help="discount factor (default: 0.99)")
parser.add_argument("--seed", type=int, default=543, metavar="N", help="random seed (default: 543)")
Expand Down
2 changes: 0 additions & 2 deletions examples/siamese_network/siamese_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ def forward_once(self, x):
return output

def forward(self, input1, input2, input3):

# pass the input through resnet
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
Expand Down Expand Up @@ -180,7 +179,6 @@ def calculate_loss(input1, input2):


def run(args, model, device, optimizer, train_loader, test_loader, lr_scheduler):

# using Triplet Margin Loss
criterion = nn.TripletMarginLoss(p=2, margin=2.8)

Expand Down
2 changes: 1 addition & 1 deletion examples/super_resolution/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ def __init__(self, upscale_factor):
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor**2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)

self._initialize_weights()
Expand Down
1 change: 0 additions & 1 deletion ignite/base/mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@


class Serializable:

_state_dict_all_req_keys: Tuple = ()
_state_dict_one_of_opt_keys: Tuple = ()

Expand Down
2 changes: 0 additions & 2 deletions ignite/contrib/engines/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,6 @@ def _setup_common_training_handlers(
trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)

if to_save is not None:

if output_path is None and save_handler is None:
raise ValueError(
"If to_save argument is provided then output_path or save_handler arguments should be also defined"
Expand Down Expand Up @@ -242,7 +241,6 @@ def _setup_common_distrib_training_handlers(
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any,
) -> None:

_setup_common_training_handlers(
trainer,
to_save=to_save,
Expand Down
Loading

0 comments on commit 96433ce

Please sign in to comment.