2025

Ranjan Sapkota; Marco Flores-Calero; Rizwan Qureshi; Chetan Badgujar; Upesh Nepal; Alwin Poulose; Peter Zeno; Uday Bhanu Prakash Vaddevolu; Sheheryar Khan; Maged Shoman; Hong Yan; Manoj Karkee
YOLO advances to its genesis: a decadal and comprehensive review of the You Only Look Once (YOLO) series Journal Article
In: Artificial Intelligence Review, vol. 58, no. 9, pp. 274, 2025, ISSN: 1573-7462.
Abstract | Links | BibTeX | Tags: Agriculture, Artificial intelligence, Autonomous vehicles, CNN, Computer vision, Deep learning, Healthcare and medical imaging, Industrial manufacturing, Real-time object detection, Surveillance, Traffic safety, YOLO, YOLO configurations, YOLOv1 to YOLOv12, You Only Look Once
@article{sapkota_yolo_2025,
title = {YOLO advances to its genesis: a decadal and comprehensive review of the You Only Look Once (YOLO) series},
author = {Ranjan Sapkota and Marco Flores-Calero and Rizwan Qureshi and Chetan Badgujar and Upesh Nepal and Alwin Poulose and Peter Zeno and Uday Bhanu Prakash Vaddevolu and Sheheryar Khan and Maged Shoman and Hong Yan and Manoj Karkee},
url = {https://doi.org/10.1007/s10462-025-11253-3},
doi = {10.1007/s10462-025-11253-3},
issn = {1573-7462},
year = {2025},
date = {2025-06-01},
urldate = {2025-06-01},
journal = {Artificial Intelligence Review},
volume = {58},
number = {9},
pages = {274},
abstract = {This review systematically examines the progression of the You Only Look Once (YOLO) object detection algorithms from YOLOv1 to the recently unveiled YOLOv12. Employing a reverse chronological analysis, this study examines the advancements introduced by YOLO algorithms, beginning with YOLOv12 and progressing through YOLO11 (or YOLOv11), YOLOv10, YOLOv9, YOLOv8, and subsequent versions to explore each version’s contributions to enhancing speed, detection accuracy, and computational efficiency in real-time object detection. Additionally, this study reviews the alternative versions derived from YOLO architectural advancements of YOLO-NAS, YOLO-X, YOLO-R, DAMO-YOLO, and Gold-YOLO. Moreover, the study highlights the transformative impact of YOLO models across five critical application areas: autonomous vehicles and traffic safety, healthcare and medical imaging, industrial manufacturing, surveillance and security, and agriculture. By detailing the incremental technological advancements in subsequent YOLO versions, this review chronicles the evolution of YOLO, and discusses the challenges and limitations in each of the earlier versions. The evolution signifies a path towards integrating YOLO with multimodal, context-aware, and Artificial General Intelligence (AGI) systems for the next YOLO decade, promising significant implications for future developments in AI-driven applications.},
keywords = {Agriculture, Artificial intelligence, Autonomous vehicles, CNN, Computer vision, Deep learning, Healthcare and medical imaging, Industrial manufacturing, Real-time object detection, Surveillance, Traffic safety, YOLO, YOLO configurations, YOLOv1 to YOLOv12, You Only Look Once},
pubstate = {published},
tppubtype = {article}
}

Ranjan Sapkota; Manoj Karkee
In: IFAC-PapersOnLine, vol. 59, no. 23, pp. 239–244, 2025, ISSN: 2405-8963.
Abstract | Links | BibTeX | Tags: agricultural automation, Apple detection, LLM, Synthetic Image Generation, YOLOv10, YOLOv11, YOLOv12, YOLOv12 object detection, You Only Look Once
@article{sapkota_improved_2025,
title = {Improved YOLOv12 with LLM-Generated Synthetic Data for Enhanced Apple Detection and Benchmarking Against YOLOv11 and YOLOv10⁎⁎Sponsor and financial support acknowledgment goes here. Paper titles should be written in uppercase and lowercase letters, not all uppercase.},
author = {Ranjan Sapkota and Manoj Karkee},
url = {https://www.sciencedirect.com/science/article/pii/S2405896325024929},
doi = {https://doi.org/10.1016/j.ifacol.2025.11.793},
issn = {2405-8963},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IFAC-PapersOnLine},
volume = {59},
number = {23},
pages = {239\textendash244},
abstract = {Accurate fruit detection in orchards remains a major bottleneck in agricultural automation due to the high cost and labor demands of data collection and annotation. This study evaluated the performance of the YOLOv12 object detection model, and compared against the performances YOLOv11 and YOLOv10 for apple detection in commercial orchards based on the model training completed entirely on synthetic images generated by Large Language Models (LLMs). The YOLOv12n configuration achieved the highest precision at 0.916, the highest recall at 0.969, and the highest mean Average Precision (mAP@50) at 0.978. In comparison, the YOLOv11 series was led by YOLO11x, which achieved the highest precision at 0.857, recall at 0.85, and mAP@50 at 0.91. For the YOLOv10 series, YOLOv10b and YOLOv10l both achieved the highest precision at 0.85, with YOLOv10n achieving the highest recall at 0.8 and mAP@50 at 0.89. These findings demonstrated that YOLOv12, when trained on realistic LLM-generated datasets surpassed its predecessors in key performance metrics. The technique also offered a cost-effective solution by reducing the need for extensive manual data collection in the agricultural field. In addition, this study compared the computational efficiency of all versions of YOLOv12, v11 and v10, where YOLOv11n reported the lowest inference time at 4.7 ms, compared to YOLOv12n’s 5.6 ms and YOLOv10n’s 5.9 ms. Although YOLOv12 is new and more accurate than YOLOv11, and YOLOv10, YOLO11n still stays the fastest YOLO model among YOLOv10, YOLOv11 and YOLOv12 series of models.},
keywords = {agricultural automation, Apple detection, LLM, Synthetic Image Generation, YOLOv10, YOLOv11, YOLOv12, YOLOv12 object detection, You Only Look Once},
pubstate = {published},
tppubtype = {article}
}
