# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release. Models: https://github.com/ultralytics/yolov5/tree/master/models Datasets: https://github.com/ultralytics/yolov5/tree/master/data Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data Usage: $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (RECOMMENDED) $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch """ import argparse import math import os import random import sys import time from copy import deepcopy from datetime import datetime from pathlib import Path import numpy as np import torch import torch.distributed as dist import torch.nn as nn import yaml from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import SGD, Adam, AdamW, lr_scheduler from tqdm.auto import tqdm FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative import val # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model from utils.autoanchor import check_anchors from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.downloads import attempt_download from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, is_ascii, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import check_font, plot_evolve, plot_labels from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze callbacks.run('on_pretrain_routine_start') # Directories w = save_dir / 'weights' # weights dir 权重保存目录 (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir last, best = w / 'last.pt', w / 'best.pt' # Hyperparameters if isinstance(hyp, str): # 超参数文件 with open(hyp, errors='ignore') as f: hyp = yaml.safe_load(f) # load hyps dict 解析yaml文件 LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) # Save run settings 保存hyp和opt if not evolve: with open(save_dir / 'hyp.yaml', 'w') as f: yaml.safe_dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.safe_dump(vars(opt), f, sort_keys=False) # Loggers data_dict = None if RANK in [-1, 0]: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance if loggers.wandb: data_dict = loggers.wandb.data_dict if resume: weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size # Register actions for k in methods(loggers): callbacks.register_action(k, callback=getattr(loggers, k)) # Config plots = not evolve and not opt.noplots # create plots cuda = device.type != 'cpu' init_seeds(1 + RANK) # 初始化随机种子 with torch_distributed_zero_first(LOCAL_RANK): # 加载数据配置信息 data_dict = data_dict or check_dataset(data) # check if None if not is_ascii(data_dict['names']): # non-latin labels, i.e. asian, arabic, cyrillic check_font('Arial.Unicode.ttf', progress=True) train_path, val_path = data_dict['train'], data_dict['val'] # 获取训练集、验证集路径 nc = 1 if single_cls else int(data_dict['nc']) # number of classes 获取类别数量,如果设置了opt。single_cls则为一类 names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names 获取类别名字,如果设置了opt。single_cls则为一类 assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset # Model check_suffix(weights, '.pt') # check weights pretrained = weights.endswith('.pt') # 检查权重文件后缀名是否为.pt # 是否采用预训练 if pretrained: # 加载预训练模型 with torch_distributed_zero_first(LOCAL_RANK): # 加载模型 weights = attempt_download(weights) # download if not found locally 如果本地没有则需要下载 ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak 加载模型及参数 ''' 这里模型的创建可以通过opt.cfg,也可以通过ckpt['model'].yaml 这里的区别在于是否是resume, resume时会将opt.cfg设为空, 按照ckpt['model'].yaml创建模型; 这也影响着下面是否除去anchor的key,如果resume则不加载anchor 主要是因为保存的模型会保存anchors,有时候用户自定义anchor之后,再resume,则原来基于coco数据集的anchor就会壶盖自己设定的anchor, 所以下面设置了intersect_dicts,该函数就是忽略掉exclude ''' model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create 通过opt.cfg或者ckpt['model'].yaml创建加载模型 exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 读取ckpt的参数值 csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect 转换为字典格式 model.load_state_dict(csd, strict=False) # load 预训练模型加载 # 显示加载预训练权重的键值对和创建模型的键值对 # 如果pretrained为true,则会少加载两个键值对(anchors, anchor_grid) LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report else: #创建新的模型,从头开始训练,ch为输入图片通道 model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create # Freeze 冻结模型层 """ 设置冻结层名字即可冻结模型层 作者不建议冻结层,因为在实验中显示冻结层不能获得更好的性能 作者为了使得优化参数分组可以正常进行,在下面将所有参数的requires_grad设为True """ freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze 将需要冻结的层放入列表中 for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): # 如果当前层为需要冻结的层,则将v.requires_grad置为false LOGGER.info(f'freezing {k}') v.requires_grad = False # Image size gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple # 检查输入图片分辨率确保能够整除总步长gs # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz) loggers.on_params_update({"batch_size": batch_size}) # Optimizer # nbs为模拟的batch_size; 比如默认上面设置的opt.batch_size为16,nbs为64,则模型梯度累积了64/16=4次之后再更新一次模型,变相扩大了batch_size nbs = 64 # nominal batch size accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay 根据accumulate设置权重衰减系数 LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") g = [], [], [] # optimizer parameter groups ******************************************************************** bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() #将模型分为三组(weight, bias, 其他所有参数)进行优化 将模型分为三组(卷积神经网络的权重参数weights, 卷积神经网络偏置参数bias, 批归一化的权重参数weights)进行优化 for v in model.modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # 卷积神经网络偏置参数bias g[2].append(v.bias) if isinstance(v, bn): # weight (no decay) 批归一化的权重参数weights g[1].append(v.weight) elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) 卷积神经网络的权重参数weights g[0].append(v.weight) # 选用优化器,并设置g[2]的优化方式 if opt.optimizer == 'Adam': optimizer = Adam(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum elif opt.optimizer == 'AdamW': optimizer = AdamW(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = SGD(g[2], lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': g[0], 'weight_decay': hyp['weight_decay']}) # add g0 with weight_decay 设置weight的优化方式 optimizer.add_param_group({'params': g[1]}) # add g1 (BatchNorm2d weights) 设置biases的优化方式 LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " f"{len(g[1])} weight (no decay), {len(g[0])} weight, {len(g[2])} bias") del g # Scheduler 设置学习率衰减 if opt.cos_lr: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] 余弦退火 else: lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA指数移动平均(动量) ema = ModelEMA(model) if RANK in [-1, 0] else None # 加载预训练权重 # 初始化开始训练的epoch和最好的结果 # best_fitness是以[0.0, 0.0, 0.1, 0.9]为系数并乘以[精确度, 召回率, mAP@0.5, mAP@0.5:0.95]在求和所得 # 根据best_fitness来保存best.pt start_epoch, best_fitness = 0, 0.0 if pretrained: # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] # EMA if ema and ckpt.get('ema'): ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] # Epochs start_epoch = ckpt['epoch'] + 1 # 如果resume,则备份权重 主要为了防止resume时出现其他问题导致把之前的权重覆盖掉,在这里进行备份。 if resume: assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' # 如果新设置epochs小于加载的eposh,则视新设置的epochs为需要训练的轮次数而不是总的轮次数。 if epochs < start_epoch: LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") epochs += ckpt['epoch'] # finetune additional epochs del ckpt, csd # DP mode 分布式训练,DataParallel模式,仅支持单机多卡,一般不会使用DP model 因为DDP model要比DP model优秀 # rank为进程编号,如果设置为rank=-1并且有多块GPU,则使用DataParallel模式 # rank=-1且gpu数量=1时,不会进行分布式 if cuda and RANK == -1 and torch.cuda.device_count() > 1: LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) # SyncBatchNorm 使用跨卡同步BN if opt.sync_bn and cuda and RANK != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) LOGGER.info('Using SyncBatchNorm()') print('----------------------------------------------') opt.cache = 'val' # Trainloader 创建训练集dataloader train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, hyp=hyp, augment=True, cache=None if opt.cache == 'val' else opt.cache, rect=opt.rect, rank=LOCAL_RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), shuffle=True) # 获取标签中最大的类别值, 并与类别数作比较,如果小于类别数则表示有问题 mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class nb = len(train_loader) # number of batches assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' # Process 0 TestLoader if RANK in [-1, 0]: val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, hyp=hyp, cache=None if noval else opt.cache, rect=True, rank=-1, workers=workers * 2, pad=0.5, prefix=colorstr('val: '))[0] if not resume: labels = np.concatenate(dataset.labels, 0) # c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: plot_labels(labels, names, save_dir) # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) model.half().float() # pre-reduce anchor precision callbacks.run('on_pretrain_routine_end') # DDP mode if cuda and RANK != -1: model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) # Model attributes nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) hyp['box'] *= 3 / nl # scale to layers box系数 hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers 根据自己数据集的类别数设置分类损失的系数 hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers hyp['label_smoothing'] = opt.label_smoothing model.nc = nc # attach number of classes to model 模型类别数 model.hyp = hyp # attach hyperparameters to model 超参数 model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights 从训练岩本标签得到类别权重(和类别中的目标数--即类别频率--成反比) model.names = names # 获取类别的名字 # Start training t0 = time.time() nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) 获取热身训练的迭代次数 # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training last_opt_step = -1 # 初始化mAP和results maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) ''' 设置学习率衰减所进行到的轮次, 目的是打断训练后,--resume接着训练也能正常的衔接之前的论训练进行学习率衰减 ''' scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) # 设置混合进度训练 在训练最开始之气那实例化一个GradScaler对象 stopper = EarlyStopping(patience=opt.patience) compute_loss = ComputeLoss(model) # 初始化Loss callbacks.run('on_train_start') ''' 打印训练和测试输入图片分辨率 加载图片时调用的cpu进程数 从哪个epoch开始训练 ''' LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' f"Logging results to {colorstr('bold', save_dir)}\n" f'Starting training for {epochs} epochs...') # 训练 for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ callbacks.run('on_train_epoch_start') model.train() # Update image weights (optional, single-GPU only) if opt.image_weights: ''' 平衡类别策略 如果设置进行图片采样策略,则根据前面初始化的图片采样权重model.class_weights以及maps配合每张图片包含的类别数,通过random.choices生成图片索引indeices从而进行采样 ''' cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx # Update mosaic border (optional) # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders # 初始化训练时打印的平均损失信息 mloss = torch.zeros(3, device=device) # mean losses if RANK != -1: # DDP模式打乱数据,ddp.sampler的随机此阿阳数据时基于epoch+seed作为随机种子 # 每次epoch不同,随机种子就不同 train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in (-1, 0): pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar # 创建进度条 optimizer.zero_grad() # 梯度清零 for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- callbacks.run('on_train_batch_start') # 计算迭代的次数iteration ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 # Warmup ''' 热身训练(前nw次迭代) 在前nw次迭代中,根据以下方式选取accumulate和学习率 ''' if ni <= nw: xi = [0, nw] # x interp # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) # 累计n次后更新梯度值 for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 ''' bias的学习率从0.1下降到基准学习率lr*lf(epoch), 其他的参数学习率从0增加到lr*lf(epoch). lf为上面设置的余弦退火的衰减函数 ''' x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: #动量momentum也从0.9慢慢变到hyp['momentum']中设置的值 x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) # Multi-scale 设置多尺度训练 if opt.multi_scale: sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size 从imgsz*0.5-imgsz*1.5+gs随机选取尺寸(可以被gs整除的最小整数) sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward 混合精度训练 with amp.autocast(enabled=cuda): # 开启autocast的context managers语义 pred = model(imgs) # forward;前向传播 # 计算损失, 包括分类损失,objectness损失, 框的回归损失 # loss为总损失值, loss_items为一个元组, 包含分类损失, objectness损失, 框的回归损失和总损失。 loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size if RANK != -1: # 平均不同gpu之间的梯度 loss *= WORLD_SIZE # gradient averaged between devices in DDP mode if opt.quad: loss *= 4. # Backward scaler.scale(loss).backward() # 反向传播;Scales loss, 为了梯度放大 # Optimize if ni - last_opt_step >= accumulate: # 模型反向传播accmulate次之后再根据累计的梯度更新一次参数 # scaler.step() 首先把梯度的值unscale回来。 # 如果梯度的值不是infs或者NaNs,那么调用optimizer.step()来更新权重, # 否则忽略step调用, 从而保证权重不更新 scaler.step(optimizer) # optimizer.step 进行参数更新 # 准备是否要增大scaler scaler.update() optimizer.zero_grad() # 梯度清零 if ema: ema.update(model) last_opt_step = ni # Log if RANK in (-1, 0): # 显存,进行的轮次, 损失, target的数量和图片的size等信息 mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots) if callbacks.stop_training: return # end batch ------------------------------------------------------------------------------------------------ # Scheduler 进行学习率衰减 lr = [x['lr'] for x in optimizer.param_groups] # for loggers scheduler.step() # 对lr进行调整 if RANK in (-1, 0): # mAP callbacks.run('on_train_epoch_end', epoch=epoch) ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) #更新EMA的属性, 添加include的属性 final_epoch = (epoch + 1 == epochs) or stopper.possible_stop #判断该epoch是否为最后一轮 # 对测试集进行测试, 计算mAP等指标 # 测试时使用的是EMA模型 if not noval or final_epoch: # Calculate mAP results, maps, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, model=ema.ema, single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, plots=False, callbacks=callbacks, compute_loss=compute_loss) # Update best mAP 更新best_fitness fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] if fi > best_fitness: best_fitness = fi log_vals = list(mloss) + list(results) + lr callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) # Save model ''' 保存&加载带checkpoint的模型用于inference或者resuming training; 保存模型,还保存了epoch, results, optimizer等信息, optimizer将不会在最后一轮完成后保存 model保存的是EMA的模型 ''' if (not nosave) or (final_epoch and not evolve): # if save ckpt = { 'epoch': epoch, 'best_fitness': best_fitness, 'model': deepcopy(de_parallel(model)).half(), 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, 'date': datetime.now().isoformat()} # Save last, best and delete 更新last、best模型 torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): torch.save(ckpt, w / f'epoch{epoch}.pt') del ckpt callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) # Stop Single-GPU if RANK == -1 and stopper(epoch=epoch, fitness=fi): break # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576 # stop = stopper(epoch=epoch, fitness=fi) # if RANK == 0: # dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks # Stop DPP # with torch_distributed_zero_first(RANK): # if stop: # break # must break all DDP ranks # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in (-1, 0): LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers if f is best: #测试best模型的效果 LOGGER.info(f'\nValidating {f}...') results, _, _ = val.run( data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, model=attempt_load(f, device).half(), iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, save_json=is_coco, verbose=True, plots=plots, callbacks=callbacks, compute_loss=compute_loss) # val best model with plots if is_coco: callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) callbacks.run('on_train_end', last, best, plots, epoch, results) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") torch.cuda.empty_cache() # 释放显存 return results def parse_opt(known=False): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') # 预训练权重 parser.add_argument('--cfg', type=str, default='', help='model.yaml path') # 模型配置文件及网络结构 parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') # 数据集配置文件,数据集路径,类名等 parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') # 超参数文件 parser.add_argument('--epochs', type=int, default=300) # 训练总轮次 parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') # 批次大小 parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') # 图像分辨率大小 parser.add_argument('--rect', action='store_true', help='rectangular training') # 是否采用矩阵训练,默认采用False parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') # 断点续训 parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') # 不保存模型 parser.add_argument('--noval', action='store_true', help='only validate final epoch') # 不进行验证 parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') # 不自动调整anchor,默认为False parser.add_argument('--noplots', action='store_true', help='save no plot files') #不保存绘制文件 parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') # 是否进行超参数进化,默认为False parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') # 谷歌云盘bucket parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') # 是否提前缓存图片到内存,默认为False parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') # 训练设备 parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') # 多尺度训练,默认为False parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') #数据集是否只有一个类别,默认为False parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') # 默认优化器 parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') # 是否使用跨卡同步BN,在DDP模式使用 parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') # dataloader的最大worker数量 parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') # 运行项目的目录名称 parser.add_argument('--name', default='exp', help='save to project/name') # 运行项目的目录名称 parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') # 不要增加现有项目名称 parser.add_argument('--quad', action='store_true', help='quad dataloader') parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') # 余弦LR调度器 parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') # 标签平滑 parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')#当模型长时间没有改进时停止 parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') # 冻结层,迁移学习使用 parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') # 每x个epochs保存一个检查点 parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') # gpu编号 # Weights & Biases arguments parser.add_argument('--entity', default=None, help='W&B: Entity') parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') opt = parser.parse_known_args()[0] if known else parser.parse_args() return opt def main(opt, callbacks=Callbacks()): # Checks if RANK in (-1, 0): #表示进程序号,用于进程间通讯,表征进程优先级。rank = 0 的主机为 master 节点 print_args(vars(opt)) # 打印参数字典 check_git_status() # 检查代码是否为最新 check_requirements(exclude=['thop']) # 检查python包的依赖项 # Resume if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # 断点续训,恢复之前的训练 ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # 如果resume是str,则表示传入的是模型的路径;get_latest_run()函数获取runs文件夹中最近的last。pt文件 assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' # optcan with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f: # 如果需要断点续训,则需要替换原本的opt参数 opt = argparse.Namespace(**yaml.safe_load(f)) # replace opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: # 从头开始训练 opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # 检查配置文件信息 assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: #进化策略 修改project目录路径 if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve opt.project = str(ROOT / 'runs/evolve') opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume if opt.name == 'cfg': # 训练时项目的名称 opt.name = Path(opt.cfg).stem # use model.yaml as name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # 增量命名文件夹 exp1,exp2... # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) # 选择设备 #################################################################### if LOCAL_RANK != -1: #如果当前显卡在被使用 # LOCAL_RANK:进程内,GPU 编号,非显式参数. 比方说, rank = 3,local_rank = 0 表示第 3 个进程内的第 1 块 GPU msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' assert not opt.image_weights, f'--image-weights {msg}' assert not opt.evolve, f'--evolve {msg}' assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) # 根据gpu编号选择设备 dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") ####################################################################- # Train 如果不进行超参数进化,则直接调用train()函数开始训练 if not opt.evolve: # 不执行进化策略 train(opt.hyp, opt, device, callbacks) if WORLD_SIZE > 1 and RANK == 0: LOGGER.info('Destroying process group... ') dist.destroy_process_group() ###################################################################### # Evolve hyperparameters (optional) else: # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) # 超参数进化列表,括号里分别为(突变规模,最小值,最大值) meta = { 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr 'box': (1, 0.02, 0.2), # box loss gain 'cls': (1, 0.2, 4.0), # cls loss gain 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight 'iou_t': (0, 0.1, 0.7), # IoU training threshold 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) 'scale': (1, 0.0, 0.9), # image scale (+/- gain) 'shear': (1, 0.0, 10.0), # image shear (+/- deg) 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) 'mosaic': (1, 0.0, 1.0), # image mixup (probability) 'mixup': (1, 0.0, 1.0), # image mixup (probability) 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) with open(opt.hyp, errors='ignore') as f: hyp = yaml.safe_load(f) # load hyps dict if 'anchors' not in hyp: # anchors commented in hyp.yaml hyp['anchors'] = 3 opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate # Select parent(s) parent = 'single' # parent selection method: 'single' or 'weighted' 选择进化方式 x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) # 加载evolve.txt文件 n = min(5, len(x)) # number of previous results to consider # 选取最多前五次进化结果 x = x[np.argsort(-fitness(x))][:n] # top n mutations w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) 根据结果计算hyp权重 #根据不同进化方式获得base hyp if parent == 'single' or len(x) == 1: # x = x[random.randint(0, n - 1)] # random selection x = x[random.choices(range(n), weights=w)[0]] # weighted selection elif parent == 'weighted': x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination # Mutate 超参数进化 mp, s = 0.8, 0.2 # mutation probability, sigma npr = np.random npr.seed(int(time.time())) g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 获取突变初始值 ng = len(meta) v = np.ones(ng) while all(v == 1): # mutate until a change occurs (prevent duplicates) 设置突变 v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) hyp[k] = float(x[i + 7] * v[i]) # mutate # Constrain to limits 将hyp限制在规定范围内 for k, v in meta.items(): hyp[k] = max(hyp[k], v[1]) # lower limit hyp[k] = min(hyp[k], v[2]) # upper limit hyp[k] = round(hyp[k], 5) # significant digits # Train mutation 训练 results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results 写入results和对应的hyp到evolve.txt print_mutation(results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' f"Results saved to {colorstr('bold', save_dir)}\n" f'Usage example: $ python train.py --hyp {evolve_yaml}') ####################################################################- def run(**kwargs): # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') opt = parse_opt(True) for k, v in kwargs.items(): setattr(opt, k, v) main(opt) return opt if __name__ == "__main__": opt = parse_opt() main(opt) # CUDA_VISIBLE_DEVICES="1,2" python train.py --data ../../data/helmet_fall_phone_delete_work/helmet_fall_phone.yaml --weights weights/yolov5l6.pt --img 1280 --hyp data/hyps/hyp.scratch-high.yaml --multi-scale --epochs 50 --name helmet_fall_phone_delete_work_2 --batch-size 8