Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ jobs:
run: |
python yolo/lazy.py task=inference use_wandb=False
python yolo/lazy.py task=inference use_wandb=False model=v7
python yolo/lazy.py task=inference use_wandb=False +quite=True
python yolo/lazy.py task=inference use_wandb=False +quiet=True
python yolo/lazy.py task=inference use_wandb=False name=AnyNameYouWant
python yolo/lazy.py task=inference use_wandb=False image_size=\[480,640]
python yolo/lazy.py task=inference use_wandb=False task.nms.min_confidence=0.1
Expand Down
4 changes: 1 addition & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,10 @@

[![Documentation Status](https://readthedocs.org/projects/yolo-docs/badge/?version=latest)](https://yolo-docs.readthedocs.io/en/latest/?badge=latest)
![GitHub License](https://img.shields.io/github/license/WongKinYiu/YOLO)
![WIP](https://img.shields.io/badge/status-WIP-orange)

[![Developer Mode Build & Test](https://github.com/WongKinYiu/YOLO/actions/workflows/develop.yaml/badge.svg)](https://github.com/WongKinYiu/YOLO/actions/workflows/develop.yaml)
[![Deploy Mode Validation & Inference](https://github.com/WongKinYiu/YOLO/actions/workflows/deploy.yaml/badge.svg)](https://github.com/WongKinYiu/YOLO/actions/workflows/deploy.yaml)

[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/yolov9-learning-what-you-want-to-learn-using/real-time-object-detection-on-coco)](https://paperswithcode.com/sota/real-time-object-detection-on-coco)

[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)]()
[![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-green)](https://huggingface.co/spaces/henry000/YOLO)
Expand Down Expand Up @@ -87,7 +85,7 @@ python yolo/lazy.py task=inference \ # default is inference
task.nms.min_confidence=0.1 \ # nms config
task.fast_inference=onnx \ # onnx, trt, deploy
task.data.source=data/toy/images/train \ # file, dir, webcam
+quite=True \ # Quite Output
+quiet=True \ # Quiet Output
yolo task.data.source={Any Source} # if pip installed
yolo task=inference task.data.source={Any}
```
Expand Down
2 changes: 1 addition & 1 deletion docs/HOWTO.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ python yolo/lazy.py task=validation dataset=toy name=validation
# Inference
python yolo/lazy.py task=inference
python yolo/lazy.py task=inference device=cpu
python yolo/lazy.py task=inference +quite=True
python yolo/lazy.py task=inference +quiet=True
python yolo/lazy.py task=inference name=AnyNameYouWant
python yolo/lazy.py task=inference image_size=\[480,640]
python yolo/lazy.py task=inference task.nms.min_confidence=0.1
Expand Down
1 change: 1 addition & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ def model_v7(inference_v7_cfg: Config, device) -> YOLO:
@pytest.fixture(scope="session")
def solver(train_cfg: Config) -> Trainer:
train_cfg.use_wandb = False
del train_cfg.task.data.equivalent_batch_size
callbacks, loggers, save_path = setup(train_cfg)
trainer = Trainer(
accelerator="auto",
Expand Down
2 changes: 1 addition & 1 deletion tests/test_utils/test_bounding_box_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def test_vec2box_autoanchor():
with initialize(config_path="../../yolo/config", version_base=None):
cfg: Config = compose(config_name="config", overrides=["model=v9-m"])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = create_model(cfg.model, weight_path=None).to(device)
model = create_model(cfg.model, weight_path=None)
vec2box = Vec2Box(model, cfg.model.anchor, cfg.image_size, device)
assert vec2box.strides == [8, 16, 32]

Expand Down
2 changes: 2 additions & 0 deletions yolo/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ class DataConfig:
data_augment: Dict[str, int]
source: Optional[Union[str, int]]
dynamic_shape: Optional[bool]
equivalent_batch_size: Optional[int] = 64


@dataclass
Expand Down Expand Up @@ -144,6 +145,7 @@ class Config:
model: ModelConfig
name: str

accelerator: Optional[str]
device: Union[str, int, List[int]]
cpu_num: int

Expand Down
3 changes: 2 additions & 1 deletion yolo/config/general.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
device: 0
accelerator: auto
device: auto
cpu_num: 16

image_size: [640, 640]
Expand Down
1 change: 1 addition & 0 deletions yolo/config/task/train.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ epoch: 500

data:
batch_size: 16
equivalent_batch_size: 64
image_size: ${image_size}
cpu_num: ${cpu_num}
shuffle: True
Expand Down
6 changes: 4 additions & 2 deletions yolo/lazy.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,18 @@ def main(cfg: Config):
callbacks, loggers, save_path = setup(cfg)

trainer = Trainer(
accelerator="auto",
accelerator=getattr(cfg, "accelerator", "auto"),
devices=cfg.device,
max_epochs=getattr(cfg.task, "epoch", None),
precision="16-mixed",
callbacks=callbacks,
sync_batchnorm=True,
logger=loggers,
log_every_n_steps=1,
gradient_clip_val=10,
gradient_clip_algorithm="norm",
deterministic=True,
enable_progress_bar=not getattr(cfg, "quite", False),
enable_progress_bar=not getattr(cfg, "quiet", False),
default_root_dir=save_path,
)

Expand Down
22 changes: 12 additions & 10 deletions yolo/utils/logging_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
from yolo.config.config import Config, YOLOLayer
from yolo.model.yolo import YOLO
from yolo.utils.logger import logger
from yolo.utils.model_utils import EMA
from yolo.utils.model_utils import EMA, GradientAccumulation
from yolo.utils.solver_utils import make_ap_table


Expand Down Expand Up @@ -68,7 +68,6 @@ def _init_progress(self, trainer: "Trainer") -> None:
self._reset_progress_bar_ids()
reconfigure(**self._console_kwargs)
self._console = Console()
self._console.clear_live()
self.progress = YOLOCustomProgress(
*self.configure_columns(trainer),
auto_refresh=False,
Expand Down Expand Up @@ -105,7 +104,7 @@ def on_train_batch_end(self, trainer, pl_module, outputs, batch: Any, batch_idx:
self._update(self.train_progress_bar_id, batch_idx + 1)
self._update_metrics(trainer, pl_module)
epoch_descript = "[cyan]Train [white]|"
batch_descript = "[green]Train [white]|"
batch_descript = "[green]Batch [white]|"
metrics = self.get_metrics(trainer, pl_module)
metrics.pop("v_num")
for metrics_name, metrics_val in metrics.items():
Expand Down Expand Up @@ -238,7 +237,7 @@ def on_validation_batch_end(self, trainer: Trainer, pl_module, outputs, batch, b
logger.log_image("Prediction", images, step=step, boxes=[log_bbox(pred_boxes)])


def setup_logger(logger_name, quite=False):
def setup_logger(logger_name, quiet=False):
class EmojiFormatter(logging.Formatter):
def format(self, record, emoji=":high_voltage:"):
return f"{emoji} {super().format(record)}"
Expand All @@ -249,17 +248,17 @@ def format(self, record, emoji=":high_voltage:"):
if rich_logger:
rich_logger.handlers.clear()
rich_logger.addHandler(rich_handler)
if quite:
if quiet:
rich_logger.setLevel(logging.ERROR)

coco_logger = logging.getLogger("faster_coco_eval.core.cocoeval")
coco_logger.setLevel(logging.ERROR)


def setup(cfg: Config):
quite = hasattr(cfg, "quite")
setup_logger("lightning.fabric", quite=quite)
setup_logger("lightning.pytorch", quite=quite)
quiet = hasattr(cfg, "quiet")
setup_logger("lightning.fabric", quiet=quiet)
setup_logger("lightning.pytorch", quiet=quiet)

def custom_wandb_log(string="", level=int, newline=True, repeat=True, prefix=True, silent=False):
if silent:
Expand All @@ -273,9 +272,12 @@ def custom_wandb_log(string="", level=int, newline=True, repeat=True, prefix=Tru

progress, loggers = [], []

if cfg.task.task == "train" and hasattr(cfg.task.data, "equivalent_batch_size"):
progress.append(GradientAccumulation(data_cfg=cfg.task.data, scheduler_cfg=cfg.task.scheduler))

if hasattr(cfg.task, "ema") and cfg.task.ema.enable:
progress.append(EMA(cfg.task.ema.decay))
if quite:
if quiet:
logger.setLevel(logging.ERROR)
return progress, loggers, save_path

Expand Down Expand Up @@ -336,7 +338,7 @@ def validate_log_directory(cfg: Config, exp_name: str) -> Path:
)

save_path.mkdir(parents=True, exist_ok=True)
if not getattr(cfg, "quite", False):
if not getattr(cfg, "quiet", False):
logger.info(f"📄 Created log folder: [blue b u]{save_path}[/]")
logger.addHandler(FileHandler(save_path / "output.log"))
return save_path
Expand Down
44 changes: 43 additions & 1 deletion yolo/utils/model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,13 @@
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR, SequentialLR, _LRScheduler

from yolo.config.config import IDX_TO_ID, NMSConfig, OptimizerConfig, SchedulerConfig
from yolo.config.config import (
IDX_TO_ID,
DataConfig,
NMSConfig,
OptimizerConfig,
SchedulerConfig,
)
from yolo.model.yolo import YOLO
from yolo.utils.bounding_box_utils import Anc2Box, Vec2Box, bbox_nms, transform_bbox
from yolo.utils.logger import logger
Expand Down Expand Up @@ -44,6 +50,7 @@ def __init__(self, decay: float = 0.9999, tau: float = 2000):
self.decay = decay
self.tau = tau
self.step = 0
self.batch_step_counter = 0
self.ema_state_dict = None

def setup(self, trainer, pl_module, stage):
Expand All @@ -53,18 +60,53 @@ def setup(self, trainer, pl_module, stage):
param.requires_grad = False

def on_validation_start(self, trainer: "Trainer", pl_module: "LightningModule"):
self.batch_step_counter = 0
if self.ema_state_dict is None:
self.ema_state_dict = deepcopy(pl_module.model.state_dict())
pl_module.ema.load_state_dict(self.ema_state_dict)

@no_grad()
def on_train_batch_end(self, trainer: "Trainer", pl_module: "LightningModule", *args, **kwargs) -> None:
self.batch_step_counter += 1
if self.batch_step_counter % trainer.accumulate_grad_batches:
return
self.step += 1
decay_factor = self.decay * (1 - exp(-self.step / self.tau))
for key, param in pl_module.model.state_dict().items():
self.ema_state_dict[key] = lerp(param.detach(), self.ema_state_dict[key], decay_factor)


class GradientAccumulation(Callback):
def __init__(self, data_cfg: DataConfig, scheduler_cfg: SchedulerConfig):
super().__init__()
self.equivalent_batch_size = data_cfg.equivalent_batch_size
self.actual_batch_size = data_cfg.batch_size
self.warmup_epochs = getattr(scheduler_cfg.warmup, "epochs", 0)
self.current_batch = 0
self.max_accumulation = 1
self.warmup_batches = 0
logger.info(":arrows_counterclockwise: Enable Gradient Accumulation")
Comment on lines +79 to +88
Copy link

Copilot AI Dec 30, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The GradientAccumulation class lacks a docstring explaining its purpose, parameters, and behavior. Consider adding documentation to describe how this callback manages gradient accumulation during training warmup.

Copilot uses AI. Check for mistakes.

def setup(self, trainer: "Trainer", pl_module: "LightningModule", stage: str) -> None:
effective_batch_size = self.actual_batch_size * trainer.world_size
self.max_accumulation = max(1, round(self.equivalent_batch_size / effective_batch_size))
batches_per_epoch = int(len(pl_module.train_loader) / trainer.world_size)
self.warmup_batches = int(self.warmup_epochs * batches_per_epoch)

def on_train_epoch_start(self, trainer: "Trainer", pl_module: "LightningModule") -> None:
self.current_batch = trainer.global_step

def on_train_batch_start(self, trainer: "Trainer", pl_module: "LightningModule", *args, **kwargs) -> None:
if self.current_batch < self.warmup_batches:
current_accumulation = round(lerp(1, self.max_accumulation, self.current_batch, self.warmup_batches))
else:
current_accumulation = self.max_accumulation
trainer.accumulate_grad_batches = current_accumulation

def on_train_batch_end(self, trainer: "Trainer", pl_module: "LightningModule", *args, **kwargs) -> None:
self.current_batch += 1

Comment on lines +79 to +108
Copy link

Copilot AI Dec 30, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The new GradientAccumulation callback class lacks test coverage. Consider adding tests to verify the gradient accumulation logic, warmup behavior, and integration with the trainer's accumulate_grad_batches setting.

Copilot uses AI. Check for mistakes.
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot open a new pull request to apply changes based on this feedback


def create_optimizer(model: YOLO, optim_cfg: OptimizerConfig) -> Optimizer:
"""Create an optimizer for the given model parameters based on the configuration.

Expand Down