Changes
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..5183e46
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,25 @@
+Language: Cpp
+BasedOnStyle: LLVM
+IndentWidth: 4
+TabWidth: 4
+UseTab: Never
+ColumnLimit: 80
+BreakBeforeBraces: Allman
+AllowShortFunctionsOnASingleLine: Empty
+AllowShortBlocksOnASingleLine: Empty
+IndentCaseLabels: false
+SpaceBeforeParens: Never
+PointerAlignment: Left
+DerivePointerAlignment: false
+Standard: Latest
+IncludeCategories:
+ - Regex: '^<[a-z0-9_]+>$'
+ Priority: 1
+ - Regex: '^<.*>'
+ Priority: 2
+ - Regex: '^".*"'
+ Priority: 3
+IncludeBlocks: Regroup
+SortIncludes: true
+InsertBraces: true
+AccessModifierOffset: -4
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..a4fb4fb
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+build/
+.cache/
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..34b9f5b
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,118 @@
+cmake_minimum_required(VERSION 3.24)
+project(AgentSmith)
+
+set(CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
+set(CMAKE_CXX_STANDARD 23)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS FALSE)
+
+include(FetchContent)
+
+FetchContent_Declare(
+ libmw
+ GIT_REPOSITORY https://github.com/MetroWind/libmw.git
+)
+
+FetchContent_Declare(
+ spdlog
+ GIT_REPOSITORY https://github.com/gabime/spdlog.git
+ GIT_TAG v1.12.0
+)
+
+FetchContent_Declare(
+ json
+ GIT_REPOSITORY https://github.com/nlohmann/json.git
+ GIT_TAG v3.11.3
+)
+
+FetchContent_Declare(
+ cxxopts
+ GIT_REPOSITORY https://github.com/jarro2783/cxxopts.git
+ GIT_TAG v3.1.1
+)
+
+set(SPDLOG_USE_STD_FORMAT ON)
+set(LIBMW_BUILD_URL ON)
+set(LIBMW_BUILD_SQLITE ON)
+set(LIBMW_BUILD_HTTP_SERVER OFF)
+set(LIBMW_BUILD_CRYPTO OFF)
+
+FetchContent_MakeAvailable(libmw spdlog json cxxopts)
+
+set(SOURCE_FILES
+ src/main.cpp
+ src/agent.cpp
+ src/llm_client.cpp
+ src/memory.cpp
+ src/tool.cpp
+ src/agent_smith_types.cpp
+)
+
+set(LIBS
+ mw::mw
+ mw::url
+ mw::sqlite
+ spdlog::spdlog
+ nlohmann_json::nlohmann_json
+ cxxopts::cxxopts
+)
+
+set(INCLUDES
+ ${libmw_SOURCE_DIR}/includes
+ ${json_SOURCE_DIR}/single_include
+ ${cxxopts_SOURCE_DIR}/include
+)
+
+add_executable(agent_smith ${SOURCE_FILES})
+
+set_property(TARGET agent_smith PROPERTY COMPILE_WARNING_AS_ERROR FALSE)
+target_compile_options(agent_smith PRIVATE -Wall -Wextra)
+target_include_directories(agent_smith PRIVATE ${INCLUDES} src)
+target_link_libraries(agent_smith PRIVATE ${LIBS})
+
+option(AGENT_SMITH_BUILD_TESTS "Build unit tests" ON)
+
+if(AGENT_SMITH_BUILD_TESTS)
+ FetchContent_Declare(
+ googletest
+ URL https://github.com/google/googletest/archive/refs/tags/v1.14.0.tar.gz
+ )
+ FetchContent_MakeAvailable(googletest)
+
+ set(TEST_FILES
+ src/task_test.cpp
+ src/memory_test.cpp
+ src/tool_test.cpp
+ src/agent_test.cpp
+ src/llm_client_test.cpp
+ src/agent_smith_types_test.cpp
+ )
+
+ add_executable(agent_smith_test
+ ${TEST_FILES}
+ src/agent.cpp
+ src/memory.cpp
+ src/tool.cpp
+ src/llm_client.cpp
+ src/agent_smith_types.cpp
+ )
+
+ set_property(TARGET agent_smith_test PROPERTY CXX_STANDARD 23)
+ target_compile_options(agent_smith_test PRIVATE -Wall -Wextra)
+ target_include_directories(agent_smith_test PRIVATE
+ ${INCLUDES}
+ src
+ ${googletest_SOURCE_DIR}/googletest/include
+ ${googletest_SOURCE_DIR}/googlemock/include
+ )
+
+ target_link_libraries(agent_smith_test PRIVATE
+ ${LIBS}
+ GTest::gtest_main
+ GTest::gmock_main
+ )
+
+ enable_testing()
+ include(GoogleTest)
+ gtest_discover_tests(agent_smith_test)
+endif()
diff --git a/design.md b/design.md
new file mode 100644
index 0000000..61a57ec
--- /dev/null
+++ b/design.md
@@ -0,0 +1,311 @@
+# Technical Design Document: Agent Smith Framework
+
+## 1. Introduction
+This document outlines the technical design for **Agent Smith**, a modular C++23 framework for building LLM-powered agents. It is designed to be highly efficient and leverages `libmw` for everyday system tasks, networking, and data handling.
+
+This guide is structured to help a new graduate software engineer understand the architecture and implement the system systematically.
+
+---
+
+## 2. High-Level Architecture
+The framework relies on a decoupled architecture centered around the `Agent` class.
+
+* **`Agent`**: The orchestrator. It manages the conversational loop, maintains state via `Memory`, interacts with the `LlmClient`, and dynamically executes `Tools` and `Skills`.
+* **`LlmClient`**: Handles network communication with LLM APIs (OpenAI and Gemini) using `mw::url`.
+* **`Memory`**: Stores the conversation history. Can be in-memory or backed by `mw::sqlite` for persistence.
+* **`Tool`**: An interface representing a callable action. Includes native tools and integrations via the Model Context Protocol (MCP).
+* **`Skill`**: A configuration that bundles a specific system prompt and a subset of tools to focus the agent on a specific workflow.
+* **CLI**: A command-line interface implemented using `cxxopts` to allow users to configure the agent's connection parameters (API key and endpoint) at runtime.
+
+---
+
+## 3. Data Models
+
+We use `nlohmann/json` (provided via `libmw`) for all dynamic data structures and serialization.
+
+```cpp
+#include <string>
+#include <optional>
+#include <vector>
+#include <variant>
+#include <nlohmann/json.hpp>
+
+// 1. System Message: Defines the agent's persona and overarching rules.
+struct SystemMessage
+{
+ std::string content;
+};
+
+// 2. User Message: Input provided by the human user.
+struct UserMessage
+{
+ std::string content;
+};
+
+// Represents a single tool call requested by the Assistant
+struct ToolCall
+{
+ std::string id;
+ std::string name;
+ nlohmann::json arguments;
+};
+
+// 3. Assistant Message: The LLM's response. Can contain text or tool calls.
+struct AssistantMessage
+{
+ std::optional<std::string> content;
+ std::vector<ToolCall> tool_calls;
+};
+
+// 4. Tool Result Message: The result of executing a tool, sent back to LLM.
+struct ToolResultMessage
+{
+ std::string tool_call_id;
+ std::string content;
+};
+
+// The unified Message type using std::variant for compile-time type safety.
+using Message = std::variant<
+ SystemMessage,
+ UserMessage,
+ AssistantMessage,
+ ToolResultMessage>;
+
+// Standalone serialization functions for each message type
+nlohmann::json toJson(const SystemMessage& msg);
+nlohmann::json toJson(const UserMessage& msg);
+nlohmann::json toJson(const ToolCall& msg);
+nlohmann::json toJson(const AssistantMessage& msg);
+nlohmann::json toJson(const ToolResultMessage& msg);
+nlohmann::json toJson(const Message& msg);
+```
+
+---
+
+## 4. Component Design
+
+### 4.1 LlmClient Interface
+The `LlmClient` abstracts the underlying LLM provider. This component uses `mw::E<>` for explicit error handling and C++23 coroutines for asynchronous task management.
+
+```cpp
+#include <mw/error.hpp>
+
+// A placeholder for a C++23 coroutine Task type.
+template<typename T> class Task;
+
+class LlmClient
+{
+public:
+ virtual ~LlmClient() = default;
+
+ // Submits the history and tools, returning the Assistant's response or an error
+ virtual Task<mw::E<Message>> generateResponse(
+ const std::vector<Message>& history,
+ const nlohmann::json& available_tools_schema
+ ) = 0;
+};
+```
+* **Implementation Steps:**
+ * Implement `OpenAiClient`. Allow dependency injection of `mw::HTTPSessionInterface` (falling back to `mw::HTTPSession`) to enable rigorous mocked network testing.
+ * Use `mw::URL` to safely parse custom endpoints and append standard paths (like `chat/completions`).
+ * Deserialize the response body into the `Message` struct, utilizing `std::unexpected(mw::runtimeError(...))` on JSON parsing failures.
+
+### 4.2 Tool and MCP System
+A `Tool` must expose its JSON Schema so the LLM understands its parameters, and an execution method.
+
+```cpp
+class Tool
+{
+public:
+ virtual ~Tool() = default;
+ virtual std::string name() const = 0;
+ virtual std::string description() const = 0;
+
+ // Returns the JSON Schema describing the arguments the tool accepts
+ virtual nlohmann::json parametersSchema() const = 0;
+
+ // Executes the tool with the JSON arguments provided by the LLM
+ virtual Task<mw::E<std::string>> execute(const nlohmann::json& arguments) = 0;
+};
+```
+
+**Tool Registry:**
+To support multiple agents and centralized tool management, tools are owned by a global or application-level `ToolRegistry`. Methods returning `mw::E<void>` signal success or report duplication errors.
+
+```cpp
+#include <unordered_map>
+#include <memory>
+#include <stdexcept>
+#include <format>
+
+class ToolRegistry
+{
+public:
+ // Registers a tool. Throws an exception if the tool name already exists
+ // to prevent silent collisions.
+ void registerTool(std::unique_ptr<Tool> tool)
+ {
+ if(tools_.contains(tool->name()))
+ {
+ throw std::runtime_error(
+ std::format("Tool '{}' is already registered.", tool->name()));
+ }
+ tools_[tool->name()] = std::move(tool);
+ }
+
+ // Registers a tool with a namespace prefix to avoid collisions
+ // (e.g., from different MCP servers).
+ void registerToolWithNamespace(
+ const std::string& ns,
+ std::unique_ptr<Tool> tool)
+ {
+ // Implementation detail: The Tool interface or a wrapper class
+ // would need to return the new name when name() is called.
+ std::string prefixed_name = std::format("{}_{}", ns, tool->name());
+ if(tools_.contains(prefixed_name))
+ {
+ throw std::runtime_error(
+ std::format("Tool '{}' is already registered.", prefixed_name));
+ }
+ // ... wrap tool and store with prefixed_name
+ }
+
+ Tool* getTool(const std::string& name) const
+ {
+ auto it = tools_.find(name);
+ return it != tools_.end() ? it->second.get() : nullptr;
+ }
+
+ std::vector<Tool*> getAllTools() const
+ {
+ std::vector<Tool*> result;
+ for(const auto& [name, tool] : tools_)
+ {
+ result.push_back(tool.get());
+ }
+ return result;
+ }
+
+private:
+ std::unordered_map<std::string, std::unique_ptr<Tool>> tools_;
+};
+```
+
+* **Handling Collisions & Namespacing:** LLMs require unique tool names. If an MCP server provides a tool named `search` and another server also provides `search`, the `ToolRegistry` will reject the second registration. To resolve this, external tools (like those from MCP) should be registered using `registerToolWithNamespace` (e.g., `github_search` vs `local_search`).
+* **MCP Integration:**
+ * Implement an `McpClient` that can parse MCP server manifests.
+ * The `McpClient` will act as a factory, dynamically generating `Tool` objects that map to remote MCP functions and registering them with the `ToolRegistry` using the server's name as a namespace.
+
+### 4.3 Agent Skills
+A `Skill` overrides the agent's default behavior, giving it a new persona and restricted capabilities.
+
+```cpp
+struct Skill
+{
+ std::string name;
+ std::string system_prompt;
+ // Names of the tools allowed for this skill
+ std::vector<std::string> allowed_tools;
+};
+```
+
+### 4.4 Memory Management
+Handles the context window.
+
+```cpp
+class Memory
+{
+public:
+ virtual ~Memory() = default;
+ virtual void addMessage(const Message& msg) = 0;
+ virtual std::vector<Message> getHistory() const = 0;
+ virtual void clear() = 0;
+};
+```
+* **Implementation Steps:**
+ * Create `InMemoryMemory` (a simple `std::vector` wrapper).
+ * Create `SqliteMemory` using `mw::sqlite` to persist `Message` structs to a database file.
+
+### 4.5 The Agent Core
+The `Agent` ties the components together in its main conversational loop.
+
+```cpp
+#include <memory>
+#include <vector>
+
+class Agent
+{
+public:
+ Agent(
+ std::unique_ptr<LlmClient> client,
+ std::unique_ptr<Memory> memory,
+ ToolRegistry& tool_registry);
+
+ // Grants the agent permission to use a specific tool from the registry
+ mw::E<void> allowTool(const std::string& tool_name);
+ void activateSkill(const Skill& skill);
+
+ // The main entry point for user interaction
+ Task<mw::E<std::string>> run(const std::string& user_input);
+
+private:
+ std::unique_ptr<LlmClient> client_;
+ std::unique_ptr<Memory> memory_;
+ ToolRegistry& tool_registry_;
+
+ // List of tool names this agent is currently allowed to use
+ std::vector<std::string> allowed_tools_;
+ std::optional<Skill> current_skill_;
+};
+```
+
+**The `run` Loop Logic (Pseudo-code):**
+1. Append `user_input` as a `UserMessage` to `memory_`.
+2. `loop`:
+ 1. Fetch `history` from `memory_`.
+ 2. Resolve `allowed_tools_` against the `tool_registry_` and convert them into a single JSON Schema array.
+ 3. `response_msg = co_await client_->generateResponse(history, tools_schema)`.
+ 4. Append `response_msg` (which is an `AssistantMessage`) to `memory_`.
+ 5. `if response_msg contains tool_calls`:
+ 1. For each `call` in `tool_calls`:
+ 1. Find matching `Tool* tool` in `tool_registry_`.
+ 2. `result = co_await tool->execute(call.arguments)`.
+ 3. Append a `ToolResultMessage` (containing `result` and `call.id`) to `memory_`.
+ 6. `else`:
+ 1. Break loop.
+3. Return `response_msg.content` (or extract content from the variant).
+
+---
+
+## 5. Build System & Dependencies
+* **CMake:** The project will be built using CMake (`CMakeLists.txt`).
+* **C++23:** Ensure compiler flags support standard C++23 (`-std=c++23`).
+* **libmw:** Include `libmw` via `FetchContent` or as a Git submodule.
+ * Link against `mw::core`, `mw::url`, `mw::sqlite`.
+* **Logging:** Use `spdlog` (bundled with `libmw`) for detailed debug and execution logging.
+
+---
+
+## 6. Implementation Plan (For the Developer)
+
+To build this systematically, follow these phases:
+
+1. **Phase 1: Foundations & Build Setup**
+ * Create the `CMakeLists.txt`. Integrate `libmw`.
+ * Define the core structs (`Message`, `Role`, `Skill`).
+2. **Phase 2: Networking & LLM Client**
+ * Implement the `LlmClient` interface.
+ * Build `OpenAiClient` using `mw::url`. Test it with a hardcoded prompt to ensure you can parse the JSON responses.
+3. **Phase 3: The Core Loop**
+ * Implement the `Agent` class and a simple `InMemoryMemory` class.
+ * Get a basic back-and-forth chat working in the terminal *without* tools.
+4. **Phase 4: Tool Calling**
+ * Implement the `Tool` interface.
+ * Create a simple mock tool (e.g., `CalculatorTool`).
+ * Implement the `Agent::run` loop logic to parse tool calls, execute them, and feed the results back to the LLM.
+5. **Phase 5: Persistence & Skills**
+ * Implement `SqliteMemory` utilizing `mw::sqlite`.
+ * Add the ability to load and switch between `Skill` profiles.
+6. **Phase 6: Async Polish & MCP**
+ * Finalize C++23 coroutine integration for all network and execution paths.
+ * Research and implement an `McpClient` to communicate with external MCP servers over stdio.
\ No newline at end of file
diff --git a/prd.md b/prd.md
new file mode 100644
index 0000000..ee20ea2
--- /dev/null
+++ b/prd.md
@@ -0,0 +1,77 @@
+# PRD: Agent Smith - C++ LLM Agent Framework
+
+## 1. Overview
+Agent Smith is a high-performance, modular C++ framework for building LLM-powered agents. It aims to provide a clean abstraction for interacting with LLM providers, managing agent state, and executing tools. The framework strongly leans on `libmw` for everyday system tasks, networking, and data handling.
+
+## 2. Core Components
+
+### 2.1 LLM Provider Interface (`LlmClient`)
+* **Abstraction:** Unified interface focused on two primary backends:
+ * OpenAI-compatible endpoints (OpenAI, Local models running OpenAI-compatible servers like vLLM/llama.cpp server).
+ * Google Gemini API.
+* **Capabilities:**
+ * Support for standard request/response cycles via `libmw`'s HTTP client (`mw::url` wrapper around cURL).
+ * Asynchronous streaming of tokens.
+ * Configurable parameters (temperature, max tokens, etc.).
+ * System prompt management.
+
+### 2.2 Agent Core (`Agent`)
+* **Logic:** Manages the conversational loop and decision-making process.
+* **Orchestration:** Coordinates between the `LlmClient`, `Memory`, and `Tool` systems.
+* **State Management:** Maintains the current status and internal reasoning of the agent.
+
+### 2.3 Tool/Action System (`Tool` & MCP)
+* **Definition:** Interface for defining functions that agents can invoke. The framework will natively support standard **tool calls**.
+* **Model Context Protocol (MCP):** Full support for MCP to allow standardized communication, capability discovery, and seamless interaction with external servers, tools, and data sources.
+* **Discovery:** Mechanism to provide tool schemas (JSON Schema) to the LLM.
+* **Execution:** Automated parsing of LLM-generated arguments and execution of C++ functions.
+* **Feedback:** Structured way to return tool results back to the LLM context.
+
+### 2.4 Agent Skills
+* **Modular Capabilities:** Support for defining, loading, and activating specialized "skills".
+* **Extension:** Skills will bundle specialized system prompts, contextual knowledge, and restricted toolsets to guide the agent through specific, complex workflows or domain-specific tasks.
+
+### 2.5 Memory & Context Management (`Memory`)
+* **Storage:** Methods for storing and retrieving conversation history. Local persistence can be implemented using `mw::sqlite`.
+* **Strategies:**
+ * Rolling window (fixed token/message limit).
+ * Summary-based memory.
+
+## 3. Functional Requirements
+
+### 3.1 Asynchronous Operations
+* If asynchronous execution is necessary (e.g., for non-blocking LLM calls or tool executions), the framework will utilize **C++ standard coroutines**.
+
+### 3.2 JSON Integration
+* Robust serialization and deserialization for API communication and tool argument parsing using **`nlohmann/json`** (leveraging `libmw`'s integration).
+
+### 3.3 Streaming Support
+* Real-time processing of token streams for both UI feedback and intermediate agent reasoning.
+
+### 3.4 Resilience, Error Handling & Logging
+* **Error Propagation:** Utilize `mw::E<T>` (`std::expected` wrapper) extensively for safe, exception-free error propagation across public interfaces, especially for network and JSON parsing tasks.
+* **Built-in retry logic:** for network failures and rate limiting.
+* **Validation:** Validation of LLM outputs to handle malformed or hallucinated tool calls, returning explicit error payloads back to the conversational loop.
+* **Logging:** Comprehensive logging handled via `libmw`'s integration with **`spdlog`**.
+
+### 3.5 Command-Line Interface
+* **CLI Options:** The executable must support command-line arguments to specify:
+ * API Key (`--api-key` or `-k`).
+ * LLM API Endpoint (`--endpoint` or `-e`) to support OpenAI-compatible local servers or custom proxies. The endpoint should just be the base URL (e.g. `https://api.openai.com/v1`); paths are appended automatically via `mw::URL`.
+ * Help information (`--help` or `-h`).
+
+### 3.6 Testing & Validation
+* **Unit Testing:** The project must contain comprehensive unit test coverage for all public interfaces (Tasks, Memory, Tools, Agent logic, and LLM Clients).
+* **Frameworks:** Rely on GoogleTest (`gtest`) and GoogleMock (`gmock`).
+* **Mocking:** Network operations in `LlmClient` tests should be bypassed using injected `mw::HTTPSessionMock` interfaces.
+
+## 4. Technical Constraints
+* **Language Standard:** C++23.
+* **Build System:** CMake.
+* **Dependencies:** Heavy reliance on `libmw` for core utilities, networking (`mw::url`), database (`mw::sqlite`), and JSON processing (`nlohmann/json`).
+* **Portability:** Target Linux/Unix systems primarily.
+* All URL manipulations should be done with the `libmw::URL` class.
+* All functions that can fail should return a `mw:E<>`.
+
+## 5. Future Works
+* **Vector Database Integration (RAG):** Implement Retrieval-Augmented Generation to allow agents to perform semantic search across large external knowledge bases.
diff --git a/src/agent.cpp b/src/agent.cpp
new file mode 100644
index 0000000..54060db
--- /dev/null
+++ b/src/agent.cpp
@@ -0,0 +1,98 @@
+#include "agent.hpp"
+
+#include <spdlog/spdlog.h>
+
+Agent::Agent(std::unique_ptr<LlmClient> client, std::unique_ptr<Memory> memory,
+ ToolRegistry& tool_registry)
+ : client_(std::move(client)), memory_(std::move(memory)),
+ tool_registry_(tool_registry)
+{
+}
+
+mw::E<void> Agent::allowTool(const std::string& tool_name)
+{
+ if(tool_registry_.getTool(tool_name) == nullptr)
+ {
+ return std::unexpected(mw::runtimeError("Tool not found in registry"));
+ }
+ allowed_tools_.push_back(tool_name);
+ return {};
+}
+
+void Agent::activateSkill(const Skill& skill)
+{
+ current_skill_ = skill;
+ allowed_tools_ = skill.allowed_tools;
+ memory_->addMessage(SystemMessage{skill.system_prompt});
+}
+
+Task<mw::E<std::string>> Agent::run(const std::string& user_input)
+{
+ memory_->addMessage(UserMessage{user_input});
+
+ while(true)
+ {
+ auto history = memory_->getHistory();
+ nlohmann::json tools_schema = nlohmann::json::array();
+
+ for(const auto& tool_name : allowed_tools_)
+ {
+ if(Tool* tool = tool_registry_.getTool(tool_name))
+ {
+ nlohmann::json tool_json = {
+ {"type", "function"},
+ {"function",
+ {{"name", tool->name()},
+ {"description", tool->description()},
+ {"parameters", tool->parametersSchema()}}}};
+ tools_schema.push_back(tool_json);
+ }
+ }
+
+ auto response_res =
+ co_await client_->generateResponse(history, tools_schema);
+ if(!response_res.has_value())
+ {
+ co_return std::unexpected(response_res.error());
+ }
+
+ Message response_msg = response_res.value();
+ memory_->addMessage(response_msg);
+
+ auto& assistant_msg = std::get<AssistantMessage>(response_msg);
+
+ if(!assistant_msg.tool_calls.empty())
+ {
+ for(const auto& call : assistant_msg.tool_calls)
+ {
+ if(Tool* tool = tool_registry_.getTool(call.name))
+ {
+ auto result = co_await tool->execute(call.arguments);
+ if(result.has_value())
+ {
+ memory_->addMessage(
+ ToolResultMessage{call.id, result.value()});
+ }
+ else
+ {
+ memory_->addMessage(ToolResultMessage{
+ call.id, "Error: " + mw::errorMsg(result.error())});
+ }
+ }
+ else
+ {
+ memory_->addMessage(
+ ToolResultMessage{call.id, "Error: Tool not found"});
+ }
+ }
+ }
+ else
+ {
+ if(assistant_msg.content)
+ {
+ co_return *assistant_msg.content;
+ }
+ co_return "";
+ }
+ }
+}
diff --git a/src/agent.hpp b/src/agent.hpp
new file mode 100644
index 0000000..e31c592
--- /dev/null
+++ b/src/agent.hpp
@@ -0,0 +1,34 @@
+#pragma once
+
+#include <memory>
+#include <optional>
+#include <string>
+#include <vector>
+
+#include <mw/error.hpp>
+
+#include "agent_smith_types.hpp"
+#include "llm_client.hpp"
+#include "memory.hpp"
+#include "task.hpp"
+#include "tool.hpp"
+
+class Agent
+{
+public:
+ Agent(std::unique_ptr<LlmClient> client, std::unique_ptr<Memory> memory,
+ ToolRegistry& tool_registry);
+
+ mw::E<void> allowTool(const std::string& tool_name);
+ void activateSkill(const Skill& skill);
+
+ Task<mw::E<std::string>> run(const std::string& user_input);
+
+private:
+ std::unique_ptr<LlmClient> client_;
+ std::unique_ptr<Memory> memory_;
+ ToolRegistry& tool_registry_;
+
+ std::vector<std::string> allowed_tools_;
+ std::optional<Skill> current_skill_;
+};
diff --git a/src/agent_smith_types.cpp b/src/agent_smith_types.cpp
new file mode 100644
index 0000000..e759560
--- /dev/null
+++ b/src/agent_smith_types.cpp
@@ -0,0 +1,50 @@
+#include "agent_smith_types.hpp"
+
+nlohmann::json toJson(const SystemMessage& msg)
+{
+ return {{"role", "system"}, {"content", msg.content}};
+}
+
+nlohmann::json toJson(const UserMessage& msg)
+{
+ return {{"role", "user"}, {"content", msg.content}};
+}
+
+nlohmann::json toJson(const ToolCall& msg)
+{
+ return {{"id", msg.id},
+ {"type", "function"},
+ {"function",
+ {{"name", msg.name}, {"arguments", msg.arguments.dump()}}}};
+}
+
+nlohmann::json toJson(const AssistantMessage& msg)
+{
+ nlohmann::json j = {{"role", "assistant"}};
+ if(msg.content)
+ {
+ j["content"] = *msg.content;
+ }
+ if(!msg.tool_calls.empty())
+ {
+ nlohmann::json calls_json = nlohmann::json::array();
+ for(const auto& call : msg.tool_calls)
+ {
+ calls_json.push_back(toJson(call));
+ }
+ j["tool_calls"] = calls_json;
+ }
+ return j;
+}
+
+nlohmann::json toJson(const ToolResultMessage& msg)
+{
+ return {{"role", "tool"},
+ {"tool_call_id", msg.tool_call_id},
+ {"content", msg.content}};
+}
+
+nlohmann::json toJson(const Message& msg)
+{
+ return std::visit([](const auto& m) { return toJson(m); }, msg);
+}
diff --git a/src/agent_smith_types.hpp b/src/agent_smith_types.hpp
new file mode 100644
index 0000000..61b1f0a
--- /dev/null
+++ b/src/agent_smith_types.hpp
@@ -0,0 +1,54 @@
+#pragma once
+
+#include <optional>
+#include <string>
+#include <variant>
+#include <vector>
+
+#include <nlohmann/json.hpp>
+
+struct SystemMessage
+{
+ std::string content;
+};
+
+struct UserMessage
+{
+ std::string content;
+};
+
+struct ToolCall
+{
+ std::string id;
+ std::string name;
+ nlohmann::json arguments;
+};
+
+struct AssistantMessage
+{
+ std::optional<std::string> content;
+ std::vector<ToolCall> tool_calls;
+};
+
+struct ToolResultMessage
+{
+ std::string tool_call_id;
+ std::string content;
+};
+
+using Message = std::variant<SystemMessage, UserMessage, AssistantMessage,
+ ToolResultMessage>;
+
+nlohmann::json toJson(const SystemMessage& msg);
+nlohmann::json toJson(const UserMessage& msg);
+nlohmann::json toJson(const ToolCall& msg);
+nlohmann::json toJson(const AssistantMessage& msg);
+nlohmann::json toJson(const ToolResultMessage& msg);
+nlohmann::json toJson(const Message& msg);
+
+struct Skill
+{
+ std::string name;
+ std::string system_prompt;
+ std::vector<std::string> allowed_tools;
+};
diff --git a/src/agent_smith_types_test.cpp b/src/agent_smith_types_test.cpp
new file mode 100644
index 0000000..dbfd2d4
--- /dev/null
+++ b/src/agent_smith_types_test.cpp
@@ -0,0 +1,67 @@
+#include <gtest/gtest.h>
+
+#include "agent_smith_types.hpp"
+
+TEST(AgentSmithTypesTest, SystemMessageToJson)
+{
+ SystemMessage msg{"You are a helpful assistant."};
+ nlohmann::json j = toJson(msg);
+ EXPECT_EQ(j["role"], "system");
+ EXPECT_EQ(j["content"], "You are a helpful assistant.");
+}
+
+TEST(AgentSmithTypesTest, UserMessageToJson)
+{
+ UserMessage msg{"What is 2+2?"};
+ nlohmann::json j = toJson(msg);
+ EXPECT_EQ(j["role"], "user");
+ EXPECT_EQ(j["content"], "What is 2+2?");
+}
+
+TEST(AgentSmithTypesTest, ToolCallToJson)
+{
+ ToolCall call{"call_1", "calc", {{"a", 2}, {"b", 2}}};
+ nlohmann::json j = toJson(call);
+ EXPECT_EQ(j["id"], "call_1");
+ EXPECT_EQ(j["type"], "function");
+ EXPECT_EQ(j["function"]["name"], "calc");
+ EXPECT_EQ(j["function"]["arguments"], "{\"a\":2,\"b\":2}");
+}
+
+TEST(AgentSmithTypesTest, AssistantMessageToJson_ContentOnly)
+{
+ AssistantMessage msg{"The answer is 4.", {}};
+ nlohmann::json j = toJson(msg);
+ EXPECT_EQ(j["role"], "assistant");
+ EXPECT_EQ(j["content"], "The answer is 4.");
+ EXPECT_FALSE(j.contains("tool_calls"));
+}
+
+TEST(AgentSmithTypesTest, AssistantMessageToJson_ToolCallsOnly)
+{
+ AssistantMessage msg{std::nullopt,
+ {{"call_1", "calc", {{"a", 2}, {"b", 2}}}}};
+ nlohmann::json j = toJson(msg);
+ EXPECT_EQ(j["role"], "assistant");
+ EXPECT_FALSE(j.contains("content"));
+ ASSERT_TRUE(j.contains("tool_calls"));
+ EXPECT_EQ(j["tool_calls"].size(), 1);
+ EXPECT_EQ(j["tool_calls"][0]["id"], "call_1");
+}
+
+TEST(AgentSmithTypesTest, ToolResultMessageToJson)
+{
+ ToolResultMessage msg{"call_1", "4"};
+ nlohmann::json j = toJson(msg);
+ EXPECT_EQ(j["role"], "tool");
+ EXPECT_EQ(j["tool_call_id"], "call_1");
+ EXPECT_EQ(j["content"], "4");
+}
+
+TEST(AgentSmithTypesTest, MessageVariantToJson)
+{
+ Message msg = UserMessage{"Hello"};
+ nlohmann::json j = toJson(msg);
+ EXPECT_EQ(j["role"], "user");
+ EXPECT_EQ(j["content"], "Hello");
+}
diff --git a/src/agent_test.cpp b/src/agent_test.cpp
new file mode 100644
index 0000000..02de426
--- /dev/null
+++ b/src/agent_test.cpp
@@ -0,0 +1,103 @@
+#include <gtest/gtest.h>
+
+#include "agent.hpp"
+#include "llm_client.hpp"
+#include "memory.hpp"
+#include "tool.hpp"
+
+class MockLlmClient : public LlmClient
+{
+public:
+ std::vector<Message> responses;
+ size_t call_count = 0;
+
+ Task<mw::E<Message>> generateResponse(const std::vector<Message>&,
+ const nlohmann::json&) override
+ {
+ if(call_count < responses.size())
+ {
+ auto res = responses[call_count++];
+ co_return res;
+ }
+ co_return AssistantMessage{"default_mock_end", {}};
+ }
+};
+
+class DummyTool : public Tool
+{
+public:
+ std::string name() const override
+ {
+ return "dummy";
+ }
+ std::string description() const override
+ {
+ return "dummy desc";
+ }
+ nlohmann::json parametersSchema() const override
+ {
+ return nlohmann::json::object();
+ }
+ Task<mw::E<std::string>> execute(const nlohmann::json&) override
+ {
+ co_return "dummy_result";
+ }
+};
+
+TEST(AgentTest, BasicRunAndReturn)
+{
+ auto client = std::make_unique<MockLlmClient>();
+ client->responses.push_back(AssistantMessage{"Hello from Agent", {}});
+
+ auto memory = std::make_unique<InMemoryMemory>();
+ ToolRegistry registry;
+
+ Agent agent(std::move(client), std::move(memory), registry);
+
+ auto result = agent.run("Hi").get();
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(result.value(), "Hello from Agent");
+}
+
+TEST(AgentTest, ToolCallingFlow)
+{
+ auto client = std::make_unique<MockLlmClient>();
+
+ AssistantMessage tool_call_msg;
+ tool_call_msg.tool_calls.push_back(
+ {"call_1", "dummy", nlohmann::json::object()});
+
+ client->responses.push_back(tool_call_msg);
+ client->responses.push_back(AssistantMessage{"Final Result", {}});
+
+ auto memory = std::make_unique<InMemoryMemory>();
+ ToolRegistry registry;
+ registry.registerTool(std::make_unique<DummyTool>());
+
+ Agent agent(std::move(client), std::move(memory), registry);
+ agent.allowTool("dummy");
+
+ auto result = agent.run("Hi").get();
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(result.value(), "Final Result");
+}
+
+TEST(AgentTest, ActivateSkill)
+{
+ auto client = std::make_unique<MockLlmClient>();
+ client->responses.push_back(AssistantMessage{"Skill Active", {}});
+
+ auto memory = std::make_unique<InMemoryMemory>();
+ auto* mem_ptr = memory.get();
+ ToolRegistry registry;
+
+ Agent agent(std::move(client), std::move(memory), registry);
+
+ Skill test_skill{"test_skill", "You are a tester", {}};
+ agent.activateSkill(test_skill);
+
+ auto history = mem_ptr->getHistory();
+ ASSERT_EQ(history.size(), 1);
+ EXPECT_TRUE(std::holds_alternative<SystemMessage>(history[0]));
+ EXPECT_EQ(std::get<SystemMessage>(history[0]).content, "You are a tester");
+}
diff --git a/src/calculator_tool.hpp b/src/calculator_tool.hpp
new file mode 100644
index 0000000..37921ed
--- /dev/null
+++ b/src/calculator_tool.hpp
@@ -0,0 +1,81 @@
+#pragma once
+
+#include <string>
+
+#include "tool.hpp"
+
+class CalculatorTool : public Tool
+{
+public:
+ std::string name() const override
+ {
+ return "calculator";
+ }
+
+ std::string description() const override
+ {
+ return "Performs basic arithmetic operations.";
+ }
+
+ nlohmann::json parametersSchema() const override
+ {
+ return R"({
+ "type": "object",
+ "properties": {
+ "operation": {
+ "type": "string",
+ "enum": ["add", "subtract", "multiply", "divide"],
+ "description": "The operation to perform"
+ },
+ "a": {
+ "type": "number"
+ },
+ "b": {
+ "type": "number"
+ }
+ },
+ "required": ["operation", "a", "b"]
+ })"_json;
+ }
+
+ Task<mw::E<std::string>> execute(const nlohmann::json& arguments) override
+ {
+ if(!arguments.contains("operation") || !arguments.contains("a") ||
+ !arguments.contains("b"))
+ {
+ co_return std::unexpected(mw::runtimeError("Missing arguments"));
+ }
+
+ std::string op = arguments["operation"];
+ double a = arguments["a"];
+ double b = arguments["b"];
+ double result = 0.0;
+
+ if(op == "add")
+ {
+ result = a + b;
+ }
+ else if(op == "subtract")
+ {
+ result = a - b;
+ }
+ else if(op == "multiply")
+ {
+ result = a * b;
+ }
+ else if(op == "divide")
+ {
+ if(b == 0)
+ {
+ co_return std::unexpected(mw::runtimeError("Division by zero"));
+ }
+ result = a / b;
+ }
+ else
+ {
+ co_return std::unexpected(mw::runtimeError("Unknown operation"));
+ }
+
+ co_return std::to_string(result);
+ }
+};
diff --git a/src/llm_client.cpp b/src/llm_client.cpp
new file mode 100644
index 0000000..16a0c79
--- /dev/null
+++ b/src/llm_client.cpp
@@ -0,0 +1,104 @@
+#include "llm_client.hpp"
+
+#include <mw/http_client.hpp>
+#include <mw/url.hpp>
+#include <spdlog/spdlog.h>
+
+OpenAiClient::OpenAiClient(std::string api_key, std::string model,
+ std::string endpoint,
+ std::unique_ptr<mw::HTTPSessionInterface> session)
+ : api_key_(std::move(api_key)), model_(std::move(model)),
+ endpoint_(std::move(endpoint)),
+ session_(session ? std::move(session)
+ : std::make_unique<mw::HTTPSession>())
+{
+}
+
+mw::E<AssistantMessage>
+OpenAiClient::parseResponse(std::string_view response_body) const
+{
+ try
+ {
+ auto response_json = nlohmann::json::parse(response_body);
+ if(!response_json.contains("choices") || response_json["choices"].empty())
+ {
+ return std::unexpected(mw::runtimeError(
+ std::string("Invalid response format: missing choices. Body: ") + std::string(response_body)));
+ }
+ auto& choice = response_json["choices"][0]["message"];
+
+ AssistantMessage assistant_msg;
+ if(choice.contains("content") && !choice["content"].is_null())
+ {
+ assistant_msg.content = static_cast<std::string>(choice["content"]);
+ }
+
+ if(choice.contains("tool_calls"))
+ {
+ for(const auto& tc : choice["tool_calls"])
+ {
+ ToolCall call;
+ call.id = static_cast<std::string>(tc["id"]);
+ call.name = static_cast<std::string>(tc["function"]["name"]);
+ call.arguments = nlohmann::json::parse(
+ static_cast<std::string>(tc["function"]["arguments"]));
+ assistant_msg.tool_calls.push_back(call);
+ }
+ }
+
+ return assistant_msg;
+ }
+ catch(const std::exception& e)
+ {
+ return std::unexpected(
+ mw::runtimeError(std::string("JSON parse error: ") + e.what()));
+ }
+}
+
+Task<mw::E<Message>>
+OpenAiClient::generateResponse(const std::vector<Message>& history,
+ const nlohmann::json& available_tools_schema)
+{
+ nlohmann::json request_body;
+ request_body["model"] = model_;
+
+ nlohmann::json messages = nlohmann::json::array();
+ for(const auto& msg : history)
+ {
+ messages.push_back(toJson(msg));
+ }
+ request_body["messages"] = messages;
+
+ if(!available_tools_schema.empty() && available_tools_schema.is_array())
+ {
+ request_body["tools"] = available_tools_schema;
+ }
+
+ auto url_res = mw::URL::fromStr(endpoint_);
+ if(!url_res.has_value())
+ {
+ co_return std::unexpected(mw::runtimeError("Invalid endpoint URL"));
+ }
+ mw::URL request_url = std::move(url_res.value());
+ request_url.appendPath("chat/completions");
+
+ mw::HTTPRequest request(request_url.str());
+ request.addHeader("Content-Type", "application/json");
+ request.addHeader("Authorization", "Bearer " + api_key_);
+ request.setPayload(request_body.dump());
+
+ auto response = session_->post(request);
+ if(!response.has_value())
+ {
+ spdlog::error("OpenAI request failed: {}",
+ mw::errorMsg(response.error()));
+ co_return std::unexpected(response.error());
+ }
+
+ auto parsed = parseResponse(response.value()->payloadAsStr());
+ if(!parsed.has_value())
+ {
+ co_return std::unexpected(parsed.error());
+ }
+ co_return parsed.value();
+}
diff --git a/src/llm_client.hpp b/src/llm_client.hpp
new file mode 100644
index 0000000..fe07751
--- /dev/null
+++ b/src/llm_client.hpp
@@ -0,0 +1,43 @@
+#pragma once
+
+#include <memory>
+#include <string_view>
+#include <vector>
+
+#include <mw/error.hpp>
+#include <mw/http_client.hpp>
+#include <nlohmann/json.hpp>
+
+#include "agent_smith_types.hpp"
+#include "task.hpp"
+
+class LlmClient
+{
+public:
+ virtual ~LlmClient() = default;
+
+ virtual Task<mw::E<Message>>
+ generateResponse(const std::vector<Message>& history,
+ const nlohmann::json& available_tools_schema) = 0;
+};
+
+class OpenAiClient : public LlmClient
+{
+public:
+ OpenAiClient(
+ std::string api_key, std::string model = "gpt-4o",
+ std::string endpoint = "https://api.openai.com/v1",
+ std::unique_ptr<mw::HTTPSessionInterface> session = nullptr);
+
+ Task<mw::E<Message>>
+ generateResponse(const std::vector<Message>& history,
+ const nlohmann::json& available_tools_schema) override;
+
+private:
+ std::string api_key_;
+ std::string model_;
+ std::string endpoint_;
+ std::unique_ptr<mw::HTTPSessionInterface> session_;
+
+ mw::E<AssistantMessage> parseResponse(std::string_view response_body) const;
+};
diff --git a/src/llm_client_test.cpp b/src/llm_client_test.cpp
new file mode 100644
index 0000000..22881f9
--- /dev/null
+++ b/src/llm_client_test.cpp
@@ -0,0 +1,102 @@
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <mw/http_client_mock.hpp>
+
+#include "llm_client.hpp"
+
+using ::testing::_;
+using ::testing::Return;
+
+TEST(OpenAiClientTest, SuccessfulGeneration)
+{
+ auto mock_session = std::make_unique<mw::HTTPSessionMock>();
+
+ std::string mock_json_response = R"({
+ "choices": [
+ {
+ "message": {
+ "role": "assistant",
+ "content": "Hello! How can I help?"
+ }
+ }
+ ]
+ })";
+
+ mw::HTTPResponse http_response(200, mock_json_response);
+
+ EXPECT_CALL(*mock_session, post(_)).WillOnce(Return(&http_response));
+
+ OpenAiClient client("fake_key", "gpt-4o", "https://fake.endpoint.com/v1", std::move(mock_session));
+
+ std::vector<Message> history = {UserMessage{"Hi"}};
+ nlohmann::json tools = nlohmann::json::array();
+
+ auto result = client.generateResponse(history, tools).get();
+ ASSERT_TRUE(result.has_value());
+
+ auto& assistant_msg = std::get<AssistantMessage>(result.value());
+ ASSERT_TRUE(assistant_msg.content.has_value());
+ EXPECT_EQ(*assistant_msg.content, "Hello! How can I help?");
+ EXPECT_TRUE(assistant_msg.tool_calls.empty());
+}
+
+TEST(OpenAiClientTest, ToolCallGeneration)
+{
+ auto mock_session = std::make_unique<mw::HTTPSessionMock>();
+
+ std::string mock_json_response = R"({
+ "choices": [
+ {
+ "message": {
+ "role": "assistant",
+ "content": null,
+ "tool_calls": [
+ {
+ "id": "call_123",
+ "type": "function",
+ "function": {
+ "name": "calculator",
+ "arguments": "{\"a\": 2, \"b\": 2, \"operation\": \"add\"}"
+ }
+ }
+ ]
+ }
+ }
+ ]
+ })";
+
+ mw::HTTPResponse http_response(200, mock_json_response);
+
+ EXPECT_CALL(*mock_session, post(_)).WillOnce(Return(&http_response));
+
+ OpenAiClient client("fake_key", "gpt-4o", "https://fake.endpoint.com/v1", std::move(mock_session));
+
+ std::vector<Message> history = {UserMessage{"Calculate 2+2"}};
+ nlohmann::json tools = nlohmann::json::array();
+
+ auto result = client.generateResponse(history, tools).get();
+ ASSERT_TRUE(result.has_value());
+
+ auto& assistant_msg = std::get<AssistantMessage>(result.value());
+ EXPECT_FALSE(assistant_msg.content.has_value());
+ ASSERT_EQ(assistant_msg.tool_calls.size(), 1);
+ EXPECT_EQ(assistant_msg.tool_calls[0].name, "calculator");
+ EXPECT_EQ(assistant_msg.tool_calls[0].arguments["a"], 2);
+}
+
+TEST(OpenAiClientTest, NetworkFailureReturnsError)
+{
+ auto mock_session = std::make_unique<mw::HTTPSessionMock>();
+
+ EXPECT_CALL(*mock_session, post(_))
+ .WillOnce(Return(std::unexpected(mw::runtimeError("Network Error"))));
+
+ OpenAiClient client("fake_key", "gpt-4o", "https://fake.endpoint.com/v1", std::move(mock_session));
+
+ std::vector<Message> history = {UserMessage{"Hi"}};
+ nlohmann::json tools = nlohmann::json::array();
+
+ auto result = client.generateResponse(history, tools).get();
+ EXPECT_FALSE(result.has_value());
+ EXPECT_EQ(mw::errorMsg(result.error()), "Network Error");
+}
diff --git a/src/main.cpp b/src/main.cpp
new file mode 100644
index 0000000..b51feba
--- /dev/null
+++ b/src/main.cpp
@@ -0,0 +1,123 @@
+#include <cstdlib>
+#include <iostream>
+#include <memory>
+#include <string>
+
+#include <cxxopts.hpp>
+
+#include "agent.hpp"
+#include "calculator_tool.hpp"
+#include "llm_client.hpp"
+#include "memory.hpp"
+
+// A simple coroutine runner since our Task is mostly synchronous right now
+void runTask(Task<mw::E<std::string>> task)
+{
+ auto res = task.get();
+ if(res.has_value())
+ {
+ std::cout << "Agent: " << res.value() << "\n";
+ }
+ else
+ {
+ std::cout << "Agent Error: " << mw::errorMsg(res.error()) << "\n";
+ }
+}
+
+int main(int argc, char** argv)
+{
+ cxxopts::Options options("agent_smith", "C++ LLM Agent Framework");
+
+ options.add_options()
+ ("k,api-key", "LLM API Key", cxxopts::value<std::string>())
+ ("e,endpoint", "LLM API Endpoint (Base URL)",
+ cxxopts::value<std::string>()->default_value(
+ "https://api.openai.com/v1"))
+ ("m,model", "LLM Model Name",
+ cxxopts::value<std::string>()->default_value("gpt-4o"))
+ ("h,help", "Print usage")
+ ;
+
+ auto result = options.parse(argc, argv);
+
+ if(result.count("help"))
+ {
+ std::cout << options.help() << "\n";
+ return 0;
+ }
+
+ std::string api_key;
+ if(result.count("api-key"))
+ {
+ api_key = result["api-key"].as<std::string>();
+ }
+ else if(const char* env_key = std::getenv("OPENAI_API_KEY"))
+ {
+ api_key = env_key;
+ }
+ else
+ {
+ std::cerr << "Error: API key must be provided via --api-key or "
+ << "OPENAI_API_KEY environment variable.\n";
+ return 1;
+ }
+
+ std::string endpoint = result["endpoint"].as<std::string>();
+ std::string model = result["model"].as<std::string>();
+
+ auto client = std::make_unique<OpenAiClient>(api_key, model, endpoint);
+ auto memory = std::make_unique<InMemoryMemory>();
+ ToolRegistry tool_registry;
+
+ auto reg_res =
+ tool_registry.registerTool(std::make_unique<CalculatorTool>());
+ if(!reg_res.has_value())
+ {
+ std::cerr << "Failed to register tool: "
+ << mw::errorMsg(reg_res.error()) << "\n";
+ return 1;
+ }
+
+ Agent agent(std::move(client), std::move(memory), tool_registry);
+ auto allow_res = agent.allowTool("calculator");
+ if(!allow_res.has_value())
+ {
+ std::cerr << "Failed to allow tool: " << mw::errorMsg(allow_res.error())
+ << "\n";
+ return 1;
+ }
+
+ Skill default_skill{
+ "calculator_assistant",
+ "You are a helpful assistant that can perform calculations. "
+ "Use the calculator tool when needed.",
+ {"calculator"}};
+
+ agent.activateSkill(default_skill);
+
+ std::cout << "Agent Smith initialized.\n"
+ << "Endpoint: " << endpoint << "\n"
+ << "Model: " << model << "\n"
+ << "Type 'exit' to quit.\n";
+
+ std::string user_input;
+ while(true)
+ {
+ std::cout << "You: ";
+ if(!std::getline(std::cin, user_input) || user_input == "exit")
+ {
+ break;
+ }
+
+ try
+ {
+ runTask(agent.run(user_input));
+ }
+ catch(const std::exception& e)
+ {
+ std::cerr << "Exception: " << e.what() << "\n";
+ }
+ }
+
+ return 0;
+}
diff --git a/src/memory.cpp b/src/memory.cpp
new file mode 100644
index 0000000..bbf9419
--- /dev/null
+++ b/src/memory.cpp
@@ -0,0 +1,16 @@
+#include "memory.hpp"
+
+void InMemoryMemory::addMessage(const Message& msg)
+{
+ history_.push_back(msg);
+}
+
+std::vector<Message> InMemoryMemory::getHistory() const
+{
+ return history_;
+}
+
+void InMemoryMemory::clear()
+{
+ history_.clear();
+}
diff --git a/src/memory.hpp b/src/memory.hpp
new file mode 100644
index 0000000..c87c828
--- /dev/null
+++ b/src/memory.hpp
@@ -0,0 +1,25 @@
+#pragma once
+
+#include <vector>
+
+#include "agent_smith_types.hpp"
+
+class Memory
+{
+public:
+ virtual ~Memory() = default;
+ virtual void addMessage(const Message& msg) = 0;
+ virtual std::vector<Message> getHistory() const = 0;
+ virtual void clear() = 0;
+};
+
+class InMemoryMemory : public Memory
+{
+public:
+ void addMessage(const Message& msg) override;
+ std::vector<Message> getHistory() const override;
+ void clear() override;
+
+private:
+ std::vector<Message> history_;
+};
diff --git a/src/memory_test.cpp b/src/memory_test.cpp
new file mode 100644
index 0000000..25e0524
--- /dev/null
+++ b/src/memory_test.cpp
@@ -0,0 +1,23 @@
+#include <gtest/gtest.h>
+
+#include "memory.hpp"
+
+TEST(InMemoryMemoryTest, AddAndGetHistory)
+{
+ InMemoryMemory memory;
+ memory.addMessage(SystemMessage{"sys"});
+ memory.addMessage(UserMessage{"user"});
+
+ auto history = memory.getHistory();
+ ASSERT_EQ(history.size(), 2);
+ EXPECT_TRUE(std::holds_alternative<SystemMessage>(history[0]));
+ EXPECT_TRUE(std::holds_alternative<UserMessage>(history[1]));
+}
+
+TEST(InMemoryMemoryTest, ClearHistory)
+{
+ InMemoryMemory memory;
+ memory.addMessage(UserMessage{"hi"});
+ memory.clear();
+ EXPECT_TRUE(memory.getHistory().empty());
+}
diff --git a/src/task.hpp b/src/task.hpp
new file mode 100644
index 0000000..c8671a3
--- /dev/null
+++ b/src/task.hpp
@@ -0,0 +1,81 @@
+#pragma once
+
+#include <coroutine>
+#include <exception>
+#include <utility>
+
+template <typename T> class Task
+{
+public:
+ struct promise_type;
+ using handle_type = std::coroutine_handle<promise_type>;
+
+ struct promise_type
+ {
+ T value;
+ std::exception_ptr exception;
+
+ Task get_return_object()
+ {
+ return Task(handle_type::from_promise(*this));
+ }
+
+ std::suspend_never initial_suspend()
+ {
+ return {};
+ }
+ std::suspend_always final_suspend() noexcept
+ {
+ return {};
+ }
+
+ template <typename U> void return_value(U&& val)
+ {
+ value = std::forward<U>(val);
+ }
+
+ void unhandled_exception()
+ {
+ exception = std::current_exception();
+ }
+ };
+
+ Task(handle_type h) : coro(h) {}
+ Task(Task&& t) noexcept : coro(t.coro)
+ {
+ t.coro = nullptr;
+ }
+ ~Task()
+ {
+ if(coro)
+ {
+ coro.destroy();
+ }
+ }
+
+ T get()
+ {
+ if(coro.promise().exception)
+ {
+ std::rethrow_exception(coro.promise().exception);
+ }
+ return coro.promise().value;
+ }
+
+ // Awaiter to allow co_await Task<T>
+ bool await_ready() const noexcept
+ {
+ return coro.done();
+ }
+ void await_suspend(std::coroutine_handle<> awaiting_coro)
+ {
+ // Simple synchronous execution for this placeholder
+ }
+ T await_resume()
+ {
+ return get();
+ }
+
+private:
+ handle_type coro;
+};
diff --git a/src/task_test.cpp b/src/task_test.cpp
new file mode 100644
index 0000000..4c0fcea
--- /dev/null
+++ b/src/task_test.cpp
@@ -0,0 +1,29 @@
+#include <stdexcept>
+#include <string>
+
+#include <gtest/gtest.h>
+
+#include "task.hpp"
+
+Task<int> asyncCompute()
+{
+ co_return 42;
+}
+
+Task<int> asyncThrow()
+{
+ throw std::runtime_error("Task Error");
+ co_return 0; // Unreachable
+}
+
+TEST(TaskTest, ReturnsValue)
+{
+ auto task = asyncCompute();
+ EXPECT_EQ(task.get(), 42);
+}
+
+TEST(TaskTest, ThrowsException)
+{
+ auto task = asyncThrow();
+ EXPECT_THROW(task.get(), std::runtime_error);
+}
diff --git a/src/tool.cpp b/src/tool.cpp
new file mode 100644
index 0000000..2519430
--- /dev/null
+++ b/src/tool.cpp
@@ -0,0 +1,42 @@
+#include "tool.hpp"
+
+mw::E<void> ToolRegistry::registerTool(std::unique_ptr<Tool> tool)
+{
+ if(tools_.contains(tool->name()))
+ {
+ return std::unexpected(mw::runtimeError(
+ std::format("Tool '{}' is already registered.", tool->name())));
+ }
+ tools_[tool->name()] = std::move(tool);
+ return {};
+}
+
+mw::E<void> ToolRegistry::registerToolWithNamespace(const std::string& ns,
+ std::unique_ptr<Tool> tool)
+{
+ std::string prefixed_name = std::format("{}_{}", ns, tool->name());
+ if(tools_.contains(prefixed_name))
+ {
+ return std::unexpected(mw::runtimeError(
+ std::format("Tool '{}' is already registered.", prefixed_name)));
+ }
+
+ tools_[prefixed_name] = std::move(tool);
+ return {};
+}
+
+Tool* ToolRegistry::getTool(const std::string& name) const
+{
+ auto it = tools_.find(name);
+ return it != tools_.end() ? it->second.get() : nullptr;
+}
+
+std::vector<Tool*> ToolRegistry::getAllTools() const
+{
+ std::vector<Tool*> result;
+ for(const auto& [name, tool] : tools_)
+ {
+ result.push_back(tool.get());
+ }
+ return result;
+}
diff --git a/src/tool.hpp b/src/tool.hpp
new file mode 100644
index 0000000..4511e29
--- /dev/null
+++ b/src/tool.hpp
@@ -0,0 +1,36 @@
+#pragma once
+
+#include <format>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <mw/error.hpp>
+#include <nlohmann/json.hpp>
+
+#include "task.hpp"
+
+class Tool
+{
+public:
+ virtual ~Tool() = default;
+ virtual std::string name() const = 0;
+ virtual std::string description() const = 0;
+ virtual nlohmann::json parametersSchema() const = 0;
+ virtual Task<mw::E<std::string>>
+ execute(const nlohmann::json& arguments) = 0;
+};
+
+class ToolRegistry
+{
+public:
+ mw::E<void> registerTool(std::unique_ptr<Tool> tool);
+ mw::E<void> registerToolWithNamespace(const std::string& ns,
+ std::unique_ptr<Tool> tool);
+ Tool* getTool(const std::string& name) const;
+ std::vector<Tool*> getAllTools() const;
+
+private:
+ std::unordered_map<std::string, std::unique_ptr<Tool>> tools_;
+};
diff --git a/src/tool_test.cpp b/src/tool_test.cpp
new file mode 100644
index 0000000..f6f938a
--- /dev/null
+++ b/src/tool_test.cpp
@@ -0,0 +1,67 @@
+#include <gtest/gtest.h>
+#include <nlohmann/json.hpp>
+
+#include "tool.hpp"
+
+class MockTool : public Tool
+{
+public:
+ std::string name() const override
+ {
+ return "mock_tool";
+ }
+ std::string description() const override
+ {
+ return "Mock Tool";
+ }
+ nlohmann::json parametersSchema() const override
+ {
+ return nlohmann::json::object();
+ }
+ Task<mw::E<std::string>> execute(const nlohmann::json&) override
+ {
+ co_return "mock_result";
+ }
+};
+
+TEST(ToolRegistryTest, RegisterAndGetTool)
+{
+ ToolRegistry registry;
+ auto res = registry.registerTool(std::make_unique<MockTool>());
+ ASSERT_TRUE(res.has_value());
+
+ Tool* tool = registry.getTool("mock_tool");
+ ASSERT_NE(tool, nullptr);
+ EXPECT_EQ(tool->name(), "mock_tool");
+}
+
+TEST(ToolRegistryTest, DuplicateRegistrationFails)
+{
+ ToolRegistry registry;
+ ASSERT_TRUE(
+ registry.registerTool(std::make_unique<MockTool>()).has_value());
+ EXPECT_FALSE(
+ registry.registerTool(std::make_unique<MockTool>()).has_value());
+}
+
+TEST(ToolRegistryTest, RegisterWithNamespace)
+{
+ ToolRegistry registry;
+ auto res =
+ registry.registerToolWithNamespace("ns", std::make_unique<MockTool>());
+ ASSERT_TRUE(res.has_value());
+
+ Tool* tool = registry.getTool("ns_mock_tool");
+ ASSERT_NE(tool, nullptr);
+ EXPECT_EQ(tool->name(), "mock_tool");
+}
+
+TEST(ToolRegistryTest, GetAllTools)
+{
+ ToolRegistry registry;
+ registry.registerTool(std::make_unique<MockTool>());
+
+ auto tools = registry.getAllTools();
+ ASSERT_EQ(tools.size(), 1);
+ EXPECT_EQ(tools[0]->name(), "mock_tool");
+}