95
95
)
96
96
97
97
# version check
98
- #if torch.__version__.startswith("1.8"):
99
- # Autocast = torch.cuda.amp.autocast(enabled=amp)
100
- # GradScaler = torch.cuda.amp.GradScaler
101
- #else:
102
- # Autocast = torch.amp.autocast("cuda", enabled=amp)
103
- # GradScaler = torch.amp.GradScaler
98
+ # if torch.__version__.startswith("1.8"):
99
+ # Autocast = torch.cuda.amp.autocast(enabled=amp)
100
+ # GradScaler = torch.cuda.amp.GradScaler
101
+ # else:
102
+ # Autocast = torch.amp.autocast("cuda", enabled=amp)
103
+ # GradScaler = torch.amp.GradScaler
104
104
105
105
LOCAL_RANK = int (os .getenv ("LOCAL_RANK" , - 1 )) # https://pytorch.org/docs/stable/elastic/run.html
106
106
RANK = int (os .getenv ("RANK" , - 1 ))
@@ -114,33 +114,33 @@ def train(hyp, opt, device, callbacks):
114
114
model architecture, loss computation, and optimizer steps.
115
115
116
116
Args:
117
- hyp (str | dict): Path to the hyperparameters YAML file or a dictionary of hyperparameters.
118
- opt (argparse.Namespace): Parsed command-line arguments containing training options.
119
- device (torch.device): Device on which training occurs, e.g., 'cuda' or 'cpu'.
120
- callbacks (Callbacks): Callback functions for various training events.
117
+ hyp (str | dict): Path to the hyperparameters YAML file or a dictionary of hyperparameters.
118
+ opt (argparse.Namespace): Parsed command-line arguments containing training options.
119
+ device (torch.device): Device on which training occurs, e.g., 'cuda' or 'cpu'.
120
+ callbacks (Callbacks): Callback functions for various training events.
121
121
122
122
Returns:
123
- None
124
- #
125
- Models and datasets download automatically from the latest YOLOv5 release.
123
+ None
124
+ #
125
+ Models and datasets download automatically from the latest YOLOv5 release.
126
126
127
127
Example:
128
- Single-GPU training:
129
- ```bash
130
- $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended)
131
- $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch
132
- ```
133
-
134
- Multi-GPU DDP training:
135
- ```bash
136
- $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights
137
- yolov5s.pt --img 640 --device 0,1,2,3
138
- ```
139
-
140
- For more usage details, refer to:
141
- - Models: https://github.com/ultralytics/yolov5/tree/master/models
142
- - Datasets: https://github.com/ultralytics/yolov5/tree/master/data
143
- - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
128
+ Single-GPU training:
129
+ ```bash
130
+ $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended)
131
+ $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch
132
+ ```
133
+
134
+ Multi-GPU DDP training:
135
+ ```bash
136
+ $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights
137
+ yolov5s.pt --img 640 --device 0,1,2,3
138
+ ```
139
+
140
+ For more usage details, refer to:
141
+ - Models: https://github.com/ultralytics/yolov5/tree/master/models
142
+ - Datasets: https://github.com/ultralytics/yolov5/tree/master/data
143
+ - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
144
144
"""
145
145
save_dir , epochs , batch_size , weights , single_cls , evolve , data , cfg , resume , noval , nosave , workers , freeze = (
146
146
Path (opt .save_dir ),
@@ -360,7 +360,7 @@ def lf(x):
360
360
maps = np .zeros (nc ) # mAP per class
361
361
results = (
0 ,
0 ,
0 ,
0 ,
0 ,
0 ,
0 )
# P, R, [email protected] , [email protected] , val_loss(box, obj, cls)
362
362
scheduler .last_epoch = start_epoch - 1 # do not move
363
- # scaler = GradScaler(enabled=amp)
363
+ # scaler = GradScaler(enabled=amp)
364
364
scaler = None
365
365
if torch .__version__ .startswith ("1.8" ):
366
366
scaler = torch .cuda .amp .GradScaler (enabled = amp )
@@ -422,7 +422,7 @@ def lf(x):
422
422
imgs = nn .functional .interpolate (imgs , size = ns , mode = "bilinear" , align_corners = False )
423
423
424
424
# Forward
425
- #with Autocast:
425
+ # with Autocast:
426
426
amp_autocast = None
427
427
if torch .__version__ .startswith ("1.8" ):
428
428
amp_autocast = torch .cuda .amp .autocast (enabled = amp )
0 commit comments