Skip to content

Commit 6fe685f

Browse files
Auto-format by https://ultralytics.com
1 parent ed9b85f commit 6fe685f

File tree

1 file changed

+31
-31
lines changed

1 file changed

+31
-31
lines changed

train.py

+31-31
Original file line numberDiff line numberDiff line change
@@ -95,12 +95,12 @@
9595
)
9696

9797
# version check
98-
#if torch.__version__.startswith("1.8"):
99-
# Autocast = torch.cuda.amp.autocast(enabled=amp)
100-
# GradScaler = torch.cuda.amp.GradScaler
101-
#else:
102-
# Autocast = torch.amp.autocast("cuda", enabled=amp)
103-
# GradScaler = torch.amp.GradScaler
98+
# if torch.__version__.startswith("1.8"):
99+
# Autocast = torch.cuda.amp.autocast(enabled=amp)
100+
# GradScaler = torch.cuda.amp.GradScaler
101+
# else:
102+
# Autocast = torch.amp.autocast("cuda", enabled=amp)
103+
# GradScaler = torch.amp.GradScaler
104104

105105
LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html
106106
RANK = int(os.getenv("RANK", -1))
@@ -114,33 +114,33 @@ def train(hyp, opt, device, callbacks):
114114
model architecture, loss computation, and optimizer steps.
115115
116116
Args:
117-
hyp (str | dict): Path to the hyperparameters YAML file or a dictionary of hyperparameters.
118-
opt (argparse.Namespace): Parsed command-line arguments containing training options.
119-
device (torch.device): Device on which training occurs, e.g., 'cuda' or 'cpu'.
120-
callbacks (Callbacks): Callback functions for various training events.
117+
hyp (str | dict): Path to the hyperparameters YAML file or a dictionary of hyperparameters.
118+
opt (argparse.Namespace): Parsed command-line arguments containing training options.
119+
device (torch.device): Device on which training occurs, e.g., 'cuda' or 'cpu'.
120+
callbacks (Callbacks): Callback functions for various training events.
121121
122122
Returns:
123-
None
124-
#
125-
Models and datasets download automatically from the latest YOLOv5 release.
123+
None
124+
#
125+
Models and datasets download automatically from the latest YOLOv5 release.
126126
127127
Example:
128-
Single-GPU training:
129-
```bash
130-
$ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended)
131-
$ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch
132-
```
133-
134-
Multi-GPU DDP training:
135-
```bash
136-
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights
137-
yolov5s.pt --img 640 --device 0,1,2,3
138-
```
139-
140-
For more usage details, refer to:
141-
- Models: https://github.com/ultralytics/yolov5/tree/master/models
142-
- Datasets: https://github.com/ultralytics/yolov5/tree/master/data
143-
- Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
128+
Single-GPU training:
129+
```bash
130+
$ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended)
131+
$ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch
132+
```
133+
134+
Multi-GPU DDP training:
135+
```bash
136+
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights
137+
yolov5s.pt --img 640 --device 0,1,2,3
138+
```
139+
140+
For more usage details, refer to:
141+
- Models: https://github.com/ultralytics/yolov5/tree/master/models
142+
- Datasets: https://github.com/ultralytics/yolov5/tree/master/data
143+
- Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
144144
"""
145145
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = (
146146
Path(opt.save_dir),
@@ -360,7 +360,7 @@ def lf(x):
360360
maps = np.zeros(nc) # mAP per class
361361
results = (0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls)
362362
scheduler.last_epoch = start_epoch - 1 # do not move
363-
# scaler = GradScaler(enabled=amp)
363+
# scaler = GradScaler(enabled=amp)
364364
scaler = None
365365
if torch.__version__.startswith("1.8"):
366366
scaler = torch.cuda.amp.GradScaler(enabled=amp)
@@ -422,7 +422,7 @@ def lf(x):
422422
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
423423

424424
# Forward
425-
#with Autocast:
425+
# with Autocast:
426426
amp_autocast = None
427427
if torch.__version__.startswith("1.8"):
428428
amp_autocast = torch.cuda.amp.autocast(enabled=amp)

0 commit comments

Comments
 (0)