2026

Konstantinos I. Roumeliotis; Ranjan Sapkota; Manoj Karkee; Nikolaos D. Tselikas
Agentic AI With Orchestrator-Agent Trust: A Modular Visual Classification Framework With Trust-Aware Orchestration and RAG-Based Reasoning Journal Article
In: IEEE Access, vol. 14, pp. 26965–26982, 2026, ISSN: 2169-3536.
Abstract | Links | BibTeX | Tags: Accuracy, Adaptation models, Agentic AI, Artificial intelligence, Calibration, Cognition, Costs, orchestrator agent trust, Retrieval augmented generation, retrieval augmented reasoning, Training, trust orchestration, visual classification, Visualization
@article{roumeliotis_agentic_2026,
title = {Agentic AI With Orchestrator-Agent Trust: A Modular Visual Classification Framework With Trust-Aware Orchestration and RAG-Based Reasoning},
author = {Konstantinos I. Roumeliotis and Ranjan Sapkota and Manoj Karkee and Nikolaos D. Tselikas},
url = {https://ieeexplore.ieee.org/document/11373381/},
doi = {10.1109/ACCESS.2026.3662282},
issn = {2169-3536},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {IEEE Access},
volume = {14},
pages = {26965\textendash26982},
abstract = {Modern Artificial Intelligence (AI) increasingly relies on multi-agent architectures that blend visual and language understanding. Yet, a pressing challenge remains: How can we trust these agents especially in zero-shot settings with no fine-tuning? We introduce a novel modular Agentic AI visual classification framework that integrates generalist multimodal agents with a non-visual reasoning orchestrator and a Retrieval-Augmented Generation (RAG) module. Applied to apple leaf disease diagnosis, we benchmark three configurations: (I) zero-shot with confidence-based orchestration, (II) fine-tuned agents with improved performance, and (III) trust-calibrated orchestration enhanced by CLIP-based image retrieval and re-evaluation loops. Using confidence calibration metrics (ECE, OCR, CCC), the orchestrator modulates trust across agents. Our results demonstrate a 77.94% accuracy improvement in the zero-shot setting using trust-aware orchestration and RAG, achieving 85.63% overall. GPT-4o showed better calibration, while Qwen-2.5-VL displayed overconfidence. Furthermore, image-RAG grounded predictions with visually similar cases, enabling correction of agent overconfidence via iterative re-evaluation. The proposed system separates perception (vision agents) from meta-reasoning (orchestrator), enabling scalable and interpretable multi-agent AI. This blueprint illustrates how Agentic AI can deliver trustworthy, modular, and transparent reasoning, and is extensible to diagnostics, biology, and other trust-critical domains. In doing so, we highlight Agentic AI not just as an architecture but as a paradigm for building reliable multi-agent intelligence. All models, prompts, results, and system components including the complete software source code are openly released to support reproducibility, transparency, and community benchmarking at our Github page.},
keywords = {Accuracy, Adaptation models, Agentic AI, Artificial intelligence, Calibration, Cognition, Costs, orchestrator agent trust, Retrieval augmented generation, retrieval augmented reasoning, Training, trust orchestration, visual classification, Visualization},
pubstate = {published},
tppubtype = {article}
}

Ranjan Sapkota; Konstantinos I. Roumeliotis; Manoj Karkee
AI Agents vs. Agentic AI: A Conceptual taxonomy, applications and challenges Journal Article
In: Information Fusion, vol. 126, pp. 103599, 2026, ISSN: 1566-2535.
Abstract | Links | BibTeX | Tags: Agentic AI, AI agents, Conceptual taxonomy, Context awareness, Multi-agent systems
@article{sapkota_ai_2026,
title = {AI Agents vs. Agentic AI: A Conceptual taxonomy, applications and challenges},
author = {Ranjan Sapkota and Konstantinos I. Roumeliotis and Manoj Karkee},
url = {https://www.sciencedirect.com/science/article/pii/S1566253525006712},
doi = {https://doi.org/10.1016/j.inffus.2025.103599},
issn = {1566-2535},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Information Fusion},
volume = {126},
pages = {103599},
abstract = {Information fusion, in the context of the Generative AI era, must distinguish AI Agents from Agentic AI. This review critically distinguishes between AI Agents and Agentic AI, offering a structured, conceptual taxonomy, application mapping, and analysis of opportunities and challenges to clarify their divergent design philosophies and capabilities. We begin by outlining the search strategy and foundational definitions, characterizing AI Agents as modular systems driven and enabled by LLMs and LIMs for task-specific automation. Generative AI is positioned as a precursor providing the foundation, with AI agents advancing through tool integration, prompt engineering, and reasoning enhancements. We then characterize Agentic AI systems, which, in contrast to AI Agents, represent a paradigm shift marked by multi-agent collaboration, dynamic task decomposition, persistent memory, and coordinated autonomy. Through a chronological evaluation of architectural evolution, operational mechanisms, interaction styles, and autonomy levels, we present a comparative analysis across both AI agents and agentic AI paradigms. Application domains enabled by AI Agents such as customer support, scheduling, and data summarization are then contrasted with Agentic AI deployments in research automation, robotic coordination, and medical decision support. We further examine unique challenges in each paradigm including hallucination, brittleness, emergent behavior, and coordination failure, and propose targeted solutions such as ReAct loops, retrieval-augmented generation (RAG), automation coordination layers, and causal modeling. This work aims to provide a roadmap for developing robust, scalable, and explainable AI-driven systems.},
keywords = {Agentic AI, AI agents, Conceptual taxonomy, Context awareness, Multi-agent systems},
pubstate = {published},
tppubtype = {article}
}

Shaina Raza; Ranjan Sapkota; Manoj Karkee; Christos Emmanouilidis
TRiSM for Agentic AI: A review of Trust, Risk, and Security Management in LLM-based Agentic Multi-Agent Systems Journal Article
In: AI Open, vol. 7, pp. 71–95, 2026, ISSN: 2666-6510.
Abstract | Links | BibTeX | Tags: Adversarial robustness, Agentic AI, AI agents, AI governance, AI safety, Application security, Explainability, Human-in-the-Loop, LLM-based multi-agent systems, Model Privacy, ModelOps, Privacy-preserving AI, Risk management, TRiSM, Trustworthy AI
@article{raza_trism_2026,
title = {TRiSM for Agentic AI: A review of Trust, Risk, and Security Management in LLM-based Agentic Multi-Agent Systems},
author = {Shaina Raza and Ranjan Sapkota and Manoj Karkee and Christos Emmanouilidis},
url = {https://www.sciencedirect.com/science/article/pii/S2666651026000069},
doi = {https://doi.org/10.1016/j.aiopen.2026.02.006},
issn = {2666-6510},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {AI Open},
volume = {7},
pages = {71\textendash95},
abstract = {Agentic AI systems, built upon large language models (LLMs) and deployed in multi-agent configurations, are redefining intelligence, autonomy, collaboration, and decision-making across enterprise and societal domains. This review presents a structured analysis of Trust, Risk, and Security Management (TRiSM) in the context of LLM-based Agentic Multi-Agent Systems (AMAS). We begin by examining the conceptual foundations of Agentic AI and highlight its architectural distinctions from traditional AI agents. We then adapt and extend the AI TRiSM framework for Agentic AI, structured around key pillars: Explainability, ModelOps, Security, Privacy and their Lifecycle Governance, each contextualized to the challenges of AMAS. A risk taxonomy is proposed to capture the unique threats and vulnerabilities of Agentic AI, ranging from coordination failures to prompt-based adversarial manipulation. To make coordination and tool use measurable in practice, we propose two metrics: the Component Synergy Score (CSS), which captures inter-agent enablement, and the Tool Utilization Efficacy (TUE), which evaluates whether tools are invoked correctly and efficiently. We further discuss strategies for improving explainability in Agentic AI, as well as approaches to enhancing security and privacy through encryption, adversarial robustness, and regulatory compliance. The review concludes with a research roadmap for the responsible development and deployment of Agentic AI, highlighting key directions to align emerging systems with TRiSM principles-ensuring safety, transparency, and accountability in their operation.},
keywords = {Adversarial robustness, Agentic AI, AI agents, AI governance, AI safety, Application security, Explainability, Human-in-the-Loop, LLM-based multi-agent systems, Model Privacy, ModelOps, Privacy-preserving AI, Risk management, TRiSM, Trustworthy AI},
pubstate = {published},
tppubtype = {article}
}
