#pragma once
#include <memory>
#include <string_view>
#include <vector>
#include <mw/error.hpp>
#include <mw/http_client.hpp>
#include <nlohmann/json.hpp>
#include "message.hpp"
#include "task.hpp"
class LlmClient
{
public:
virtual ~LlmClient() = default;
virtual Task<mw::E<Message>>
generateResponse(const std::vector<Message>& history,
const nlohmann::json& available_tools_schema) = 0;
};
class OpenAiClient : public LlmClient
{
public:
OpenAiClient(
std::string api_key, std::string model = "gpt-4o",
std::string endpoint = "https://api.openai.com/v1",
std::unique_ptr<mw::HTTPSessionInterface> session = nullptr);
Task<mw::E<Message>>
generateResponse(const std::vector<Message>& history,
const nlohmann::json& available_tools_schema) override;
private:
std::string api_key_;
std::string model_;
std::string endpoint_;
std::unique_ptr<mw::HTTPSessionInterface> session_;
mw::E<AssistantMessage> parseResponse(std::string_view response_body) const;
};