TrainingPlatform_Flask/yolov5/train.py

1017 lines
45 KiB
Python
Raw Permalink Normal View History

2025-06-03 16:14:17 +08:00
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release.
Usage - Single-GPU training:
$ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended)
$ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch
Usage - Multi-GPU DDP training:
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights yolov5s.pt --img 640 --device 0,1,2,3
Models: https://github.com/ultralytics/yolov5/tree/master/models
Datasets: https://github.com/ultralytics/yolov5/tree/master/data
Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
"""
import argparse
import math
import os
import random
import subprocess
import sys
import time
from copy import deepcopy
from datetime import datetime, timedelta
from pathlib import Path
import ray
try:
import comet_ml # must be imported before torch (if installed)
except ImportError:
comet_ml = None
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import yaml
from torch.optim import lr_scheduler
from tqdm import tqdm
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
import val as validate # for end-of-epoch mAP
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.autobatch import check_train_batch_size
from utils.callbacks import Callbacks
from utils.dataloaders import create_dataloader
from utils.downloads import attempt_download, is_url
from utils.general import (
LOGGER,
TQDM_BAR_FORMAT,
check_amp,
check_dataset,
check_file,
check_git_info,
check_git_status,
check_img_size,
check_requirements,
check_suffix,
check_yaml,
colorstr,
get_latest_run,
increment_path,
init_seeds,
intersect_dicts,
labels_to_class_weights,
labels_to_image_weights,
methods,
one_cycle,
print_args,
print_mutation,
strip_optimizer,
yaml_save,
)
from utils.loggers import LOGGERS, Loggers
from utils.loggers.comet.comet_utils import check_comet_resume
from utils.loss import ComputeLoss
from utils.metrics import fitness
from utils.plots import plot_evolve
from utils.torch_utils import (
EarlyStopping,
ModelEMA,
de_parallel,
select_device,
smart_DDP,
smart_optimizer,
smart_resume,
torch_distributed_zero_first,
)
from dataset_action import update_database_status, insert_training_epoch_status
LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv("RANK", -1))
WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1))
GIT_INFO = check_git_info()
def train(training_id, hyp, opt, device, callbacks):
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = (
Path(opt.save_dir),
opt.epochs,
opt.batch_size,
opt.weights,
opt.single_cls,
opt.evolve,
opt.data,
opt.cfg,
opt.resume,
opt.noval,
opt.nosave,
opt.workers,
opt.freeze,
)
callbacks.run("on_pretrain_routine_start")
# Directories 获取记录训练日志的保存路径
# 设置权重保存的路径runs/train/exp1/weights
w = save_dir / "weights" # weights dir
# 新建文件夹 weights train evolve
(w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
# 保存训练结果的目录last.pt best.pt
last, best = w / "last.pt", w / "best.pt"
# Hyperparameters 加载超参数
if isinstance(hyp, str):
with open(hyp, errors="ignore") as f:
hyp = yaml.safe_load(f) # load hyps dict
LOGGER.info(colorstr("hyperparameters: ") + ", ".join(f"{k}={v}" for k, v in hyp.items()))
opt.hyp = hyp.copy() # for saving hyps to checkpoints
# Save run settings
if not evolve:
yaml_save(save_dir / "hyp.yaml", hyp)
yaml_save(save_dir / "opt.yaml", vars(opt))
# Loggers
data_dict = None
if RANK in {-1, 0}:
include_loggers = list(LOGGERS)
if getattr(opt, "ndjson_console", False):
include_loggers.append("ndjson_console")
if getattr(opt, "ndjson_file", False):
include_loggers.append("ndjson_file")
loggers = Loggers(
save_dir=save_dir,
weights=weights,
opt=opt,
hyp=hyp,
logger=LOGGER,
include=tuple(include_loggers),
)
# Register actions
for k in methods(loggers):
callbacks.register_action(k, callback=getattr(loggers, k))
# Process custom dataset artifact link
data_dict = loggers.remote_dataset
if resume: # If resuming runs from remote artifact
weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
# Config
plots = not evolve and not opt.noplots # create plots
cuda = device.type != "cpu"
init_seeds(opt.seed + 1 + RANK, deterministic=True)
with torch_distributed_zero_first(LOCAL_RANK):
data_dict = data_dict or check_dataset(data) # check if None
train_path, val_path = data_dict["train"], data_dict["val"]
nc = 1 if single_cls else int(data_dict["nc"]) # number of classes
names = {0: "item"} if single_cls and len(data_dict["names"]) != 1 else data_dict["names"] # class names
is_coco = isinstance(val_path, str) and val_path.endswith("coco/val2017.txt") # COCO dataset
# Model
check_suffix(weights, ".pt") # check weights
pretrained = weights.endswith(".pt")
if pretrained:
with torch_distributed_zero_first(LOCAL_RANK):
weights = attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location="cpu") # load checkpoint to CPU to avoid CUDA memory leak
model = Model(cfg or ckpt["model"].yaml, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device) # create
exclude = ["anchor"] if (cfg or hyp.get("anchors")) and not resume else [] # exclude keys
csd = ckpt["model"].float().state_dict() # checkpoint state_dict as FP32
csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(csd, strict=False) # load
LOGGER.info(f"Transferred {len(csd)}/{len(model.state_dict())} items from {weights}") # report
else:
model = Model(cfg, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device) # create
amp = check_amp(model) # check AMP
# Freeze
freeze = [f"model.{x}." for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
# v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
if any(x in k for x in freeze):
LOGGER.info(f"freezing {k}")
v.requires_grad = False
# Image size
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
# Batch size
if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
batch_size = check_train_batch_size(model, imgsz, amp)
loggers.on_params_update({"batch_size": batch_size})
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
hyp["weight_decay"] *= batch_size * accumulate / nbs # scale weight_decay
optimizer = smart_optimizer(model, opt.optimizer, hyp["lr0"], hyp["momentum"], hyp["weight_decay"])
# Scheduler
if opt.cos_lr:
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
else:
def lf(x):
"""Linear learning rate scheduler function with decay calculated by epoch proportion."""
return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if RANK in {-1, 0} else None
# Resume
best_fitness, start_epoch = 0.0, 0
if pretrained:
if resume:
best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
del ckpt, csd
# DP mode
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
LOGGER.warning(
"WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n"
"See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started."
)
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and RANK != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
LOGGER.info("Using SyncBatchNorm()")
# Trainloader
train_loader, dataset = create_dataloader(
train_path,
imgsz,
batch_size // WORLD_SIZE,
gs,
single_cls,
hyp=hyp,
augment=True,
cache=None if opt.cache == "val" else opt.cache,
rect=opt.rect,
rank=LOCAL_RANK,
workers=workers,
image_weights=opt.image_weights,
quad=opt.quad,
prefix=colorstr("train: "),
shuffle=True,
seed=opt.seed,
)
labels = np.concatenate(dataset.labels, 0)
mlc = int(labels[:, 0].max()) # max label class
assert mlc < nc, f"Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}"
# Process 0
if RANK in {-1, 0}:
val_loader = create_dataloader(
val_path,
imgsz,
batch_size // WORLD_SIZE * 2,
gs,
single_cls,
hyp=hyp,
cache=None if noval else opt.cache,
rect=True,
rank=-1,
workers=workers * 2,
pad=0.5,
prefix=colorstr("val: "),
)[0]
if not resume:
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz) # run AutoAnchor
model.half().float() # pre-reduce anchor precision
callbacks.run("on_pretrain_routine_end", labels, names)
# DDP mode
if cuda and RANK != -1:
model = smart_DDP(model)
# Model attributes
nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
hyp["box"] *= 3 / nl # scale to layers
hyp["cls"] *= nc / 80 * 3 / nl # scale to classes and layers
hyp["obj"] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
hyp["label_smoothing"] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
"""
4.2训练热身部分
"""
# Start training
t0 = time.time()
nb = len(train_loader) # number of batches
nw = max(round(hyp["warmup_epochs"] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
last_opt_step = -1
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = torch.cuda.amp.GradScaler(enabled=amp)
stopper, stop = EarlyStopping(patience=opt.patience), False
compute_loss = ComputeLoss(model) # init loss class
callbacks.run("on_train_start")
LOGGER.info(
f"热身训练结束,开始正式训练\n"
f"Image sizes {imgsz} train, {imgsz} val\n"
f"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n"
f"Logging results to {colorstr('bold', save_dir)}\n"
f"Starting training for {epochs} epochs..."
)
"""
4.3开始训练
"""
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
callbacks.run("on_train_epoch_start")
"""
告诉模型现在是训练阶段 因为BN层DropOut层两阶段目标检测模型等
训练阶段和预测阶段进行的运算不同所以要将二者分开来
"""
model.train()
# Update image weights (optional, single-GPU only)
if opt.image_weights:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Update mosaic border (optional)
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(3, device=device) # mean losses
if RANK != -1:
train_loader.sampler.set_epoch(epoch)
pbar = enumerate(train_loader)
LOGGER.info(("\n" + "%11s" * 7) % ("Epoch", "GPU_mem", "box_loss", "obj_loss", "cls_loss", "Instances", "Size"))
if RANK in {-1, 0}:
pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
callbacks.run("on_train_batch_start")
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(ni, xi, [hyp["warmup_bias_lr"] if j == 0 else 0.0, x["initial_lr"] * lf(epoch)])
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [hyp["warmup_momentum"], hyp["momentum"]])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
# Forward 前向传播
with torch.amp.autocast('cuda', enabled=amp):
# 将图片送入网络得到一个预测结果
pred = model(imgs) # forward
# 计算损失包括分类损失objectness损失框的回归损失
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if RANK != -1:
loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.0
# Backward
scaler.scale(loss).backward()
# Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
if ni - last_opt_step >= accumulate:
scaler.unscale_(optimizer) # unscale gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
last_opt_step = ni
# Log
if RANK in {-1, 0}:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = f"{torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
pbar.set_description(
("%11s" * 2 + "%11.4g" * 5)
% (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1])
)
callbacks.run("on_train_batch_end", model, ni, imgs, targets, paths, list(mloss))
if callbacks.stop_training:
return
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
lr = [x["lr"] for x in optimizer.param_groups] # for loggers
scheduler.step()
"""
4.4 训练完成保存模型
"""
if RANK in {-1, 0}:
# mAP
callbacks.run("on_train_epoch_end", epoch=epoch)
ema.update_attr(model, include=["yaml", "nc", "hyp", "names", "stride", "class_weights"])
# 判断当前epoch是否是最后一轮
final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
# noval是否只测试最后一轮 True只测试最后一轮 False每轮都测试
if not noval or final_epoch: # Calculate mAP
results, maps, _ = validate.run(
data_dict,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
half=amp,
model=ema.ema,
single_cls=single_cls, # 是否是单类数据集
dataloader=val_loader,
save_dir=save_dir, # 保存地址 runs/train/expn
plots=False, # 是否可视化
callbacks=callbacks,
compute_loss=compute_loss, # 损失函数
)
precision = round(float(results[0]), 3)
recall = round(float(results[1]), 3)
map50 = round(float(results[2]), 3)
map95 = round(float(results[3]), 3)
insert_training_epoch_status(training_id, epoch, map50, map95, precision, recall)
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
stop = stopper(epoch=epoch, fitness=fi) # early stop check
if fi > best_fitness:
best_fitness = fi
log_vals = list(mloss) + list(results) + lr
callbacks.run("on_fit_epoch_end", log_vals, epoch, best_fitness, fi)
# Save model
if (not nosave) or (final_epoch and not evolve): # if save
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"model": deepcopy(de_parallel(model)).half(),
"ema": deepcopy(ema.ema).half(),
"updates": ema.updates,
"optimizer": optimizer.state_dict(),
"opt": vars(opt),
"git": GIT_INFO, # {remote, branch, commit} if a git repo
"date": datetime.now().isoformat(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
if opt.save_period > 0 and epoch % opt.save_period == 0:
torch.save(ckpt, w / f"epoch{epoch}.pt")
del ckpt
callbacks.run("on_model_save", last, epoch, final_epoch, best_fitness, fi)
# EarlyStopping
if RANK != -1: # if DDP training
broadcast_list = [stop if RANK == 0 else None]
dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
if RANK != 0:
stop = broadcast_list[0]
if stop:
break # must break all DDP ranks
# end epoch ----------------------------------------------------------------------------------------------------
# end training -----------------------------------------------------------------------------------------------------
if RANK in {-1, 0}:
LOGGER.info(f"\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.")
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if f is best:
LOGGER.info(f"\nValidating {f}...")
results, _, _ = validate.run(
data_dict,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
model=attempt_load(f, device).half(),
iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65
single_cls=single_cls,
dataloader=val_loader,
save_dir=save_dir,
save_json=is_coco,
verbose=True,
plots=plots,
callbacks=callbacks,
compute_loss=compute_loss,
) # val best model with plots
if is_coco:
callbacks.run("on_fit_epoch_end", list(mloss) + list(results) + lr, epoch, best_fitness, fi)
callbacks.run("on_train_end", last, best, epoch, results)
torch.cuda.empty_cache()
return results
def parse_opt(pre_model_path, coco_path, epochs, img_size, batch_size, known=False):
# parser = argparse.ArgumentParser()
# 预训练权重文件
# parser.add_argument("--weights", type=str, default=pre_model_path, help="initial weights path")
#
# 训练模型
# parser.add_argument("--cfg", type=str, default="", help="model.yaml path")
#
# 训练路径
# parser.add_argument("--data", type=str, default=coco_path, help="dataset.yaml path")
#
# hpy超参数设置文件
# parser.add_argument("--hyp", type=str, default=ROOT / "data/hyps/hyp.scratch-low.yaml", help="hyperparameters path")
#
# 训练轮数
# parser.add_argument("--epochs", type=int, default=epochs, help="total training epochs")
#
# 训练批次
# parser.add_argument("--batch-size", type=int, default=batch_size, help="total batch size for all GPUs, -1 for autobatch")
#
# 设置图片大小
# parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=img_size, help="train, val image size (pixels)")
#
# 是否采用矩形训练将比例相近的图片放在一个batch里由于batch里的图片shape是一样的
# parser.add_argument("--rect", action="store_true", help="rectangular training")
#
# 是否接着上次训练的结果继续训练
# parser.add_argument("--resume", nargs="?", const=True, default=False, help="resume most recent training")
#
# 不保存模型 默认为False保存
# parser.add_argument("--nosave", action="store_true", help="only save final checkpoint")
#
# 最后进行测试设置了之后就是训练结束后测试一下。如果不设置的话每轮都会计算mAP。
# parser.add_argument("--noval", action="store_true", help="only validate final epoch")
#
# 不自动调整anchor。默认为False自动调整anchor
# parser.add_argument("--noautoanchor", action="store_true", help="disable AutoAnchor")
#
#
# parser.add_argument("--noplots", action="store_true", help="save no plot files")
#
# 参数进化,遗传算法调参
# parser.add_argument("--evolve", type=int, nargs="?", const=300, help="evolve hyperparameters for x generations")
# parser.add_argument(
# "--evolve_population", type=str, default=ROOT / "data/hyps", help="location for loading population"
# )
# parser.add_argument("--resume_evolve", type=str, default=None, help="resume evolve from last generation")
#
# 谷歌优盘(用不到)
# parser.add_argument("--bucket", type=str, default="", help="gsutil bucket")
#
# 是否提前缓存图片到内存中以加快训练速度。默认为False
# parser.add_argument("--cache", type=str, nargs="?", const="ram", help="image --cache ram/disk")
#
# 使用图像采样策略。默认不使用
# parser.add_argument("--image-weights", action="store_true", help="use weighted image selection for training")
#
# 设备选择
# parser.add_argument("--device", default="0", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
#
# 是否进行多尺度训练
# parser.add_argument("--multi-scale", action="store_true", help="vary img-size +/- 50%%")
#
# 数据集是否多类默认为True
# parser.add_argument("--single-cls", action="store_true", help="train multi-class data as single-class")
#
# 优化器选择,这里提供了三种优化器
# parser.add_argument("--optimizer", type=str, choices=["SGD", "Adam", "AdamW"], default="SGD", help="optimizer")
#
# 是否使用跨卡同步BN在DDP模式使用
# parser.add_argument("--sync-bn", action="store_true", help="use SyncBatchNorm, only available in DDP mode")
#
# dataloader的最大worker数量使用多线程加载图片
# parser.add_argument("--workers", type=int, default=1, help="max dataloader workers (per RANK in DDP mode)")
#
# 训练结果保存的路径
# parser.add_argument("--project", default=ROOT / "runs/train", help="save to project/name")
#
# 训练结果的文件名称
# parser.add_argument("--name", default="exp", help="save to project/name")
#
# 项目位置是否存在,默认都不存在
# parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
#
# 四元数据加载器:允许在较低--img 尺寸下进行更高 --img 尺寸训练的一些好处
# parser.add_argument("--quad", action="store_true", help="quad dataloader")
#
#
# parser.add_argument("--cos-lr", action="store_true", help="cosine LR scheduler")
#
# 标签平滑(默认不增强)
# parser.add_argument("--label-smoothing", type=float, default=0.0, help="Label smoothing epsilon")
#
# 早停止的耐心次数100次不更新的话就停止训练
# parser.add_argument("--patience", type=int, default=100, help="EarlyStopping patience (epochs without improvement)")
#
# freeze冻结训练
# parser.add_argument("--freeze", nargs="+", type=int, default=[0], help="Freeze layers: backbone=10, first3=0 1 2")
#
# 多少个epoch保存一下checkpoint
# parser.add_argument("--save-period", type=int, default=-1, help="Save checkpoint every x epochs (disabled if < 1)")
# parser.add_argument("--seed", type=int, default=0, help="Global training seed")
# parser.add_argument("--local_rank", type=int, default=-1, help="Automatic DDP Multi-GPU argument, do not modify")
#
# # Logger arguments日志相关参数
# parser.add_argument("--entity", default=None, help="Entity")
# parser.add_argument("--upload_dataset", nargs="?", const=True, default=False, help='Upload data, "val" option')
# parser.add_argument("--bbox_interval", type=int, default=-1, help="Set bounding-box image logging interval")
# parser.add_argument("--artifact_alias", type=str, default="latest", help="Version of dataset artifact to use")
#
# # NDJSON logging
# parser.add_argument("--ndjson-console", action="store_true", help="Log ndjson to console")
# parser.add_argument("--ndjson-file", action="store_true", help="Log ndjson to file")
# 手动构建 Namespace 对象
args = argparse.Namespace(
weights=pre_model_path, # 预训练权重文件
cfg="", # 训练模型
data=coco_path, # 训练路径
hyp=ROOT / "data/hyps/hyp.scratch-low.yaml", # hpy超参数设置文件
epochs=epochs, # 训练轮数
batch_size=batch_size, # 训练批次
imgsz=img_size, # 设置图片大小
rect=False, # 是否采用矩形训练将比例相近的图片放在一个batch里由于batch里的图片shape是一样的
resume=False, # 是否接着上次训练的结果继续训练
nosave=False, # 不保存模型 默认为False保存
noval=False, # 最后进行测试设置了之后就是训练结束后测试一下。如果不设置的话每轮都会计算mAP。
noautoanchor=False, # 不自动调整anchor。默认为False自动调整anchor
noplots=False,
evolve=None, # 参数进化,遗传算法调参
evolve_population=ROOT / "data/hyps",
resume_evolve=None,
bucket="", # 谷歌优盘(用不到)
cache=None, # 是否提前缓存图片到内存中以加快训练速度。默认为False
image_weights=False, # 使用图像采样策略。默认不使用
device="0", # 设备选择
multi_scale=False, # 是否进行多尺度训练
single_cls=False, # 数据集是否多类默认为True
optimizer="SGD", # 优化器选择,这里提供了三种优化器
sync_bn=False, # 是否使用跨卡同步BN在DDP模式使用
workers=0, # dataloader的最大worker数量使用多线程加载图片
project=ROOT / "runs/train",
name="exp",
exist_ok=False,
quad=False,
cos_lr=False,
label_smoothing=0.0,
patience=100,
freeze=[0],
save_period=-1,
seed=0,
local_rank=-1,
entity=None,
upload_dataset=False,
bbox_interval=-1,
artifact_alias="latest",
ndjson_console=False,
ndjson_file=False,
)
return args
def main(opt, callbacks=Callbacks(), training_id=None):
if training_id is None:
print("没有training_id")
return
"""
2.1检查分布式训练环境
"""
# if RANK in {-1, 0}: # 若进程编号为-1或0
#
# # 输出所有训练参数
# print_args(vars(opt))
# # 检查yolo v5的仓库是否更新若有更新的话给出提示
# check_git_status()
# # 检查requirements.txt所需要的包是否全部都满足
# check_requirements(ROOT / "requirements.txt")
"""
2.2判断是否断点重续
"""
# Resume (从指定的模型或者从最近的last.pt重续)
if opt.resume and not check_comet_resume(opt) and not opt.evolve:
last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
opt_yaml = last.parent.parent / "opt.yaml" # train options yaml
opt_data = opt.data # original dataset
if opt_yaml.is_file():
with open(opt_yaml, errors="ignore") as f:
d = yaml.safe_load(f)
else:
d = torch.load(last, map_location="cpu")["opt"]
opt = argparse.Namespace(**d) # replace
opt.cfg, opt.weights, opt.resume = "", str(last), True # reinstate
if is_url(opt_data):
opt.data = check_file(opt_data) # avoid HUB resume auth timeout
else:
# 不使用断点重续的话就从文件中读取相关参数
opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = (
check_file(opt.data),
check_yaml(opt.cfg),
check_yaml(opt.hyp),
str(opt.weights),
str(opt.project),
) # checks
# 如果模型文件和权重文件(预训练模型)为空,则弹出警告
assert len(opt.cfg) or len(opt.weights), "either --cfg or --weights must be specified"
# 若果要进行超参数进化,就重建保存路径
if opt.evolve:
# 设置新的项目输出目录
if opt.project == str(ROOT / "runs/train"): # if default project name, rename to runs/evolve
opt.project = str(ROOT / "runs/evolve")
opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
if opt.name == "cfg":
opt.name = Path(opt.cfg).stem # use model.yaml as name
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
"""
2.3判断是否分布式训练
"""
# DDP mode 支持多级多卡分布式训练
# 选择程序装载的位置
device = select_device(opt.device, batch_size=opt.batch_size)
# if LOCAL_RANK != -1:
# msg = "is not compatible with YOLOv5 Multi-GPU DDP training"
# assert not opt.image_weights, f"--image-weights {msg}"
# assert not opt.evolve, f"--evolve {msg}"
# assert opt.batch_size != -1, f"AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size"
# assert opt.batch_size % WORLD_SIZE == 0, f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE"
# assert torch.cuda.device_count() > LOCAL_RANK, "insufficient CUDA devices for DDP command"
# torch.cuda.set_device(LOCAL_RANK)
# device = torch.device("cuda", LOCAL_RANK)
# dist.init_process_group(
# backend="nccl" if dist.is_nccl_available() else "gloo", timeout=timedelta(seconds=10800)
# )
"""
2.3判断是否进化训练
"""
# Train 训练模式如果不进行超参数进化则直接调用train()函数,开始训练
if not opt.evolve: # 如果不使用超参数进化
# 开始训练
train(training_id, opt.hyp, opt, device, callbacks)
# Evolve hyperparameters (optional) 遗传进化算法,边进化边训练
else:
# Hyperparameter evolution metadata (including this hyperparameter True-False, lower_limit, upper_limit)
# 超参数列表(突变范围 - 最小值 - 最大值)
meta = {
"lr0": (False, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
"lrf": (False, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
"momentum": (False, 0.6, 0.98), # SGD momentum/Adam beta1
"weight_decay": (False, 0.0, 0.001), # optimizer weight decay
"warmup_epochs": (False, 0.0, 5.0), # warmup epochs (fractions ok)
"warmup_momentum": (False, 0.0, 0.95), # warmup initial momentum
"warmup_bias_lr": (False, 0.0, 0.2), # warmup initial bias lr
"box": (False, 0.02, 0.2), # box loss gain
"cls": (False, 0.2, 4.0), # cls loss gain
"cls_pw": (False, 0.5, 2.0), # cls BCELoss positive_weight
"obj": (False, 0.2, 4.0), # obj loss gain (scale with pixels)
"obj_pw": (False, 0.5, 2.0), # obj BCELoss positive_weight
"iou_t": (False, 0.1, 0.7), # IoU training threshold
"anchor_t": (False, 2.0, 8.0), # anchor-multiple threshold
"anchors": (False, 2.0, 10.0), # anchors per output grid (0 to ignore)
"fl_gamma": (False, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
"hsv_h": (True, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
"hsv_s": (True, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
"hsv_v": (True, 0.0, 0.9), # image HSV-Value augmentation (fraction)
"degrees": (True, 0.0, 45.0), # image rotation (+/- deg)
"translate": (True, 0.0, 0.9), # image translation (+/- fraction)
"scale": (True, 0.0, 0.9), # image scale (+/- gain)
"shear": (True, 0.0, 10.0), # image shear (+/- deg)
"perspective": (True, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
"flipud": (True, 0.0, 1.0), # image flip up-down (probability)
"fliplr": (True, 0.0, 1.0), # image flip left-right (probability)
"mosaic": (True, 0.0, 1.0), # image mosaic (probability)
"mixup": (True, 0.0, 1.0), # image mixup (probability)
"copy_paste": (True, 0.0, 1.0), # segment copy-paste (probability)
}
# GA configs
pop_size = 50
mutation_rate_min = 0.01
mutation_rate_max = 0.5
crossover_rate_min = 0.5
crossover_rate_max = 1
min_elite_size = 2
max_elite_size = 5
tournament_size_min = 2
tournament_size_max = 10
with open(opt.hyp, errors="ignore") as f:
hyp = yaml.safe_load(f) # load hyps dict
if "anchors" not in hyp: # anchors commented in hyp.yaml
hyp["anchors"] = 3
if opt.noautoanchor:
del hyp["anchors"], meta["anchors"]
opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
evolve_yaml, evolve_csv = save_dir / "hyp_evolve.yaml", save_dir / "evolve.csv"
if opt.bucket:
# download evolve.csv if exists
subprocess.run(
[
"gsutil",
"cp",
f"gs://{opt.bucket}/evolve.csv",
str(evolve_csv),
]
)
# Delete the items in meta dictionary whose first value is False
del_ = [item for item, value_ in meta.items() if value_[0] is False]
hyp_GA = hyp.copy() # Make a copy of hyp dictionary
for item in del_:
del meta[item] # Remove the item from meta dictionary
del hyp_GA[item] # Remove the item from hyp_GA dictionary
# Set lower_limit and upper_limit arrays to hold the search space boundaries
lower_limit = np.array([meta[k][1] for k in hyp_GA.keys()])
upper_limit = np.array([meta[k][2] for k in hyp_GA.keys()])
# Create gene_ranges list to hold the range of values for each gene in the population
gene_ranges = [(lower_limit[i], upper_limit[i]) for i in range(len(upper_limit))]
# Initialize the population with initial_values or random values
initial_values = []
# If resuming evolution from a previous checkpoint
if opt.resume_evolve is not None:
assert os.path.isfile(ROOT / opt.resume_evolve), "evolve population path is wrong!"
with open(ROOT / opt.resume_evolve, errors="ignore") as f:
evolve_population = yaml.safe_load(f)
for value in evolve_population.values():
value = np.array([value[k] for k in hyp_GA.keys()])
initial_values.append(list(value))
# If not resuming from a previous checkpoint, generate initial values from .yaml files in opt.evolve_population
else:
yaml_files = [f for f in os.listdir(opt.evolve_population) if f.endswith(".yaml")]
for file_name in yaml_files:
with open(os.path.join(opt.evolve_population, file_name)) as yaml_file:
value = yaml.safe_load(yaml_file)
value = np.array([value[k] for k in hyp_GA.keys()])
initial_values.append(list(value))
# Generate random values within the search space for the rest of the population
if initial_values is None:
population = [generate_individual(gene_ranges, len(hyp_GA)) for _ in range(pop_size)]
elif pop_size > 1:
population = [generate_individual(gene_ranges, len(hyp_GA)) for _ in range(pop_size - len(initial_values))]
for initial_value in initial_values:
population = [initial_value] + population
# Run the genetic algorithm for a fixed number of generations
list_keys = list(hyp_GA.keys())
for generation in range(opt.evolve):
if generation >= 1:
save_dict = {}
for i in range(len(population)):
little_dict = {list_keys[j]: float(population[i][j]) for j in range(len(population[i]))}
save_dict[f"gen{str(generation)}number{str(i)}"] = little_dict
with open(save_dir / "evolve_population.yaml", "w") as outfile:
yaml.dump(save_dict, outfile, default_flow_style=False)
# Adaptive elite size
elite_size = min_elite_size + int((max_elite_size - min_elite_size) * (generation / opt.evolve))
# Evaluate the fitness of each individual in the population
fitness_scores = []
for individual in population:
for key, value in zip(hyp_GA.keys(), individual):
hyp_GA[key] = value
hyp.update(hyp_GA)
results = train(hyp.copy(), opt, device, callbacks)
callbacks = Callbacks()
# Write mutation results
keys = (
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/box_loss",
"val/obj_loss",
"val/cls_loss",
)
print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket)
fitness_scores.append(results[2])
# Select the fittest individuals for reproduction using adaptive tournament selection
selected_indices = []
for _ in range(pop_size - elite_size):
# Adaptive tournament size
tournament_size = max(
max(2, tournament_size_min),
int(min(tournament_size_max, pop_size) - (generation / (opt.evolve / 10))),
)
# Perform tournament selection to choose the best individual
tournament_indices = random.sample(range(pop_size), tournament_size)
tournament_fitness = [fitness_scores[j] for j in tournament_indices]
winner_index = tournament_indices[tournament_fitness.index(max(tournament_fitness))]
selected_indices.append(winner_index)
# Add the elite individuals to the selected indices
elite_indices = [i for i in range(pop_size) if fitness_scores[i] in sorted(fitness_scores)[-elite_size:]]
selected_indices.extend(elite_indices)
# Create the next generation through crossover and mutation
next_generation = []
for _ in range(pop_size):
parent1_index = selected_indices[random.randint(0, pop_size - 1)]
parent2_index = selected_indices[random.randint(0, pop_size - 1)]
# Adaptive crossover rate
crossover_rate = max(
crossover_rate_min, min(crossover_rate_max, crossover_rate_max - (generation / opt.evolve))
)
if random.uniform(0, 1) < crossover_rate:
crossover_point = random.randint(1, len(hyp_GA) - 1)
child = population[parent1_index][:crossover_point] + population[parent2_index][crossover_point:]
else:
child = population[parent1_index]
# Adaptive mutation rate
mutation_rate = max(
mutation_rate_min, min(mutation_rate_max, mutation_rate_max - (generation / opt.evolve))
)
for j in range(len(hyp_GA)):
if random.uniform(0, 1) < mutation_rate:
child[j] += random.uniform(-0.1, 0.1)
child[j] = min(max(child[j], gene_ranges[j][0]), gene_ranges[j][1])
next_generation.append(child)
# Replace the old population with the new generation
population = next_generation
# Print the best solution found
best_index = fitness_scores.index(max(fitness_scores))
best_individual = population[best_index]
print("Best solution found:", best_individual)
# Plot results
plot_evolve(evolve_csv)
LOGGER.info(
f"Hyperparameter evolution finished {opt.evolve} generations\n"
f"Results saved to {colorstr('bold', save_dir)}\n"
f"Usage example: $ python train.py --hyp {evolve_yaml}"
)
def generate_individual(input_ranges, individual_length):
individual = []
for i in range(individual_length):
lower_bound, upper_bound = input_ranges[i]
individual.append(random.uniform(lower_bound, upper_bound))
return individual
def run(**kwargs):
opt = parse_opt(True)
for k, v in kwargs.items():
setattr(opt, k, v)
main(opt)
return opt
def really_training(training_id, pre_model_path, coco_path, epochs, img_size, batch_size):
opt = parse_opt(pre_model_path, coco_path, epochs, img_size, batch_size)
main(opt, training_id=training_id)
return True
if __name__ == "__main__":
really_training("D:\\Project\\AIData\\model_factory\\yolov5n.pt",
"D:\\Project\\AIData\\lzz\\APEX测试数据集\\Detection\\coco8.yaml", 10, 320, 16)