Extensible AI Chat Structure
For an extensible chat application where users can choose different AI models (e.g., Gemini, OpenAI, and more in the future), a structured approach using Abstract Base Classes (ABCs) and the Factory Pattern is highly effective. This allows you to define a common interface for all AI providers and easily plug in new ones without modifying existing core logic.
Core Concepts
- Abstract Base Class (ABC): Defines the common methods that all AI providers must implement. This enforces a consistent interface.
- Concrete Implementations: Classes for each specific AI provider (e.g.,
GeminiAIProvider
,OpenAIAIProvider
) that inherit from the ABC and implement its methods. - Factory Pattern: A mechanism to create instances of the AI providers based on a given type (e.g., "gemini", "openai") without exposing the creation logic to the client. This decouples the client from concrete provider classes.
- Configuration Management: Centralized handling of API keys and default model names, ideally loaded from environment variables for security.
Code Structure
Here's the recommended file structure and content:
project_root/
├── config.py
├── ai_providers/
│ ├── __init__.py
│ ├── abstract_ai_provider.py
│ ├── gemini_provider.py
│ └── openai_provider.py
├── ai_factory.py
└── main.py
config.py
This file handles loading configuration, especially sensitive API keys, preferably from environment variables.
import os
class Config:
"""Manages application configuration, including API keys and model defaults."""
# OpenAI Configuration
OPENAI_API_KEY: str = os.getenv("OPENAI_API_KEY", "your_openai_api_key_here")
OPENAI_DEFAULT_MODEL: str = "gpt-3.5-turbo" # Or "gpt-4"
# Gemini Configuration
GEMINI_API_KEY: str = os.getenv("GEMINI_API_KEY", "your_gemini_api_key_here")
GEMINI_DEFAULT_MODEL: str = "gemini-pro"
# Add configuration for other AI providers here
# EXAMPLE_API_KEY: str = os.getenv("EXAMPLE_API_KEY", "your_example_api_key_here")
# EXAMPLE_DEFAULT_MODEL: str = "example-model"
# Example of how to set environment variables (e.g., in your shell or .env file)
# export OPENAI_API_KEY="sk-..."
# export GEMINI_API_KEY="AIza..."
ai_providers/abstract_ai_provider.py
Defines the common interface for all AI providers.
from abc import ABC, abstractmethod
from typing import List, Dict
class AbstractAIProvider(ABC):
"""Abstract Base Class for all AI chat providers."""
def __init__(self, api_key: str, model_name: str):
self._api_key = api_key
self._model_name = model_name
@abstractmethod
def generate_response(self, messages: List[Dict]) -> str:
"""
Generates a response from the AI model based on the provided messages.
Args:
messages: A list of message dictionaries, typically in the format
`[{"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Hi!"}]`.
The exact format might vary slightly by provider, so concrete
implementations should handle conversion if necessary.
Returns:
The AI's response as a string.
"""
pass
@abstractmethod
def get_model_name(self) -> str:
"""Returns the name of the AI model being used."""
pass
# Add other common methods if needed, e.g., get_token_count, get_available_models
ai_providers/openai_provider.py
Implements the AbstractAIProvider
for OpenAI's API.
(Install openai
with pip install openai
)
from openai import OpenAI
from typing import List, Dict
from .abstract_ai_provider import AbstractAIProvider
class OpenAIAIProvider(AbstractAIProvider):
"""Concrete implementation for OpenAI chat models."""
def __init__(self, api_key: str, model_name: str):
super().__init__(api_key, model_name)
self._client = OpenAI(api_key=self._api_key)
def generate_response(self, messages: List[Dict]) -> str:
"""
Generates a response using OpenAI's chat completion API.
The 'messages' format is directly compatible with OpenAI's API.
"""
try:
chat_completion = self._client.chat.completions.create(
model=self._model_name,
messages=messages,
temperature=0.7, # Example parameter
)
return chat_completion.choices[0].message.content
except Exception as e:
print(f"Error calling OpenAI API: {e}")
return "Sorry, I couldn't get a response from OpenAI."
def get_model_name(self) -> str:
return self._model_name
ai_providers/gemini_provider.py
Implements the AbstractAIProvider
for Google Gemini's API.
(Install google-generativeai
with pip install google-generativeai
)
import google.generativeai as genai
from typing import List, Dict
from .abstract_ai_provider import AbstractAIProvider
class GeminiAIProvider(AbstractAIProvider):
"""Concrete implementation for Google Gemini chat models."""
def __init__(self, api_key: str, model_name: str):
super().__init__(api_key, model_name)
genai.configure(api_key=self._api_key)
self._model = genai.GenerativeModel(self._model_name)
def generate_response(self, messages: List[Dict]) -> str:
"""
Generates a response using Google Gemini's chat API.
Note: Gemini's `start_chat` expects messages in a slightly different
format (e.g., no 'role' for user/model in history after initial prompt).
This example simplifies for clarity, assuming a new chat per call or
a conversion function. For proper continuous chat, you'd manage `chat_session`.
"""
try:
# For simplicity, converting OpenAI-like messages to Gemini's format
# for a single turn. For multi-turn conversations, you'd manage a chat session.
gemini_messages = []
for msg in messages:
role_map = {"user": "user", "assistant": "model"}
gemini_messages.append({"role": role_map.get(msg["role"], "user"), "parts": [msg["content"]]})
# For a single request, the last message is the prompt.
# For continuous chat, use `start_chat` and `send_message`.
response = self._model.generate_content(gemini_messages[-1]["parts"][0]) # Take the last user message
return response.text
except Exception as e:
print(f"Error calling Gemini API: {e}")
return "Sorry, I couldn't get a response from Gemini."
def get_model_name(self) -> str:
return self._model_name
ai_factory.py
The factory that provides instances of AI providers based on a string identifier.
from typing import Type
from ai_providers.abstract_ai_provider import AbstractAIProvider
from ai_providers.openai_provider import OpenAIAIProvider
from ai_providers.gemini_provider import GeminiAIProvider
from config import Config
class AIProviderFactory:
"""Factory for creating AI provider instances."""
_providers: dict[str, Type[AbstractAIProvider]] = {
"openai": OpenAIAIProvider,
"gemini": GeminiAIProvider,
# Add new AI providers here
# "example_ai": ExampleAIProvider,
}
@staticmethod
def get_provider(provider_name: str) -> AbstractAIProvider:
"""
Returns an instance of the specified AI provider.
Args:
provider_name: The name of the AI provider (e.g., "openai", "gemini").
Returns:
An instance of AbstractAIProvider.
Raises:
ValueError: If the provider name is not recognized.
"""
provider_class = AIProviderFactory._providers.get(provider_name.lower())
if not provider_class:
raise ValueError(f"Unknown AI provider: {provider_name}. Available providers: {list(AIProviderFactory._providers.keys())}")
# Retrieve API key and model name from Config based on provider_name
api_key = getattr(Config, f"{provider_name.upper()}_API_KEY")
model_name = getattr(Config, f"{provider_name.upper()}_DEFAULT_MODEL")
return provider_class(api_key=api_key, model_name=model_name)
main.py
The main application logic, demonstrating how to use the factory to interact with different AI models.
from ai_factory import AIProviderFactory
from typing import List, Dict
def chat_interface():
"""Simple command-line chat interface."""
print("Welcome to the Multi-AI Chatbot!")
print("Choose your AI provider (e.g., openai, gemini):")
provider_choice = input("> ").strip().lower()
try:
ai_provider = AIProviderFactory.get_provider(provider_choice)
print(f"Using {ai_provider.get_model_name()} via {provider_choice} provider.")
except ValueError as e:
print(e)
return
messages: List[Dict] = []
print("\nStart chatting! Type 'quit' or 'exit' to end.")
while True:
user_input = input("You: ").strip()
if user_input.lower() in ["quit", "exit"]:
print("Goodbye!")
break
messages.append({"role": "user", "content": user_input})
print(f"AI ({ai_provider.get_model_name()}): Thinking...", end="\r")
ai_response = ai_provider.generate_response(messages)
print(f"AI ({ai_provider.get_model_name()}): {ai_response}")
messages.append({"role": "assistant", "content": ai_response})
# Keep messages list short for demo purposes, or implement history management
if len(messages) > 10:
messages = messages[-8:] # Keep last 4 turns (user + assistant)
if __name__ == "__main__":
# Set environment variables for API keys before running, e.g.:
# export OPENAI_API_KEY="sk-..."
# export GEMINI_API_KEY="AIza..."
#
# Or modify config.py directly for testing (not recommended for production)
chat_interface()
How to Add a New AI API (e.g., ExampleAIProvider
)
-
Install SDK: Install the Python SDK for your new AI (e.g.,
pip install example_sdk
). -
Update
config.py
: Add API key and model name configurations.# config.py
class Config:
# ... existing config
EXAMPLE_API_KEY: str = os.getenv("EXAMPLE_API_KEY", "your_example_api_key_here")
EXAMPLE_DEFAULT_MODEL: str = "example-model-v1" -
Create
ai_providers/example_ai_provider.py
:- Create a new file
example_ai_provider.py
in theai_providers
directory. - Import
AbstractAIProvider
. - Create a class
ExampleAIProvider
that inherits fromAbstractAIProvider
. - Implement the
__init__
andgenerate_response
methods according to the new API's SDK. Remember to convert messages to the API's expected format if necessary. - Implement
get_model_name
.
# ai_providers/example_ai_provider.py
# from example_sdk import ExampleClient # Hypothetical SDK import
from typing import List, Dict
from .abstract_ai_provider import AbstractAIProvider
class ExampleAIProvider(AbstractAIProvider):
def __init__(self, api_key: str, model_name: str):
super().__init__(api_key, model_name)
# self._client = ExampleClient(api_key=self._api_key) # Initialize SDK client
def generate_response(self, messages: List[Dict]) -> str:
# Convert messages to the format expected by ExampleAIProvider's SDK
# For simplicity, let's assume it takes the last user message as a string
last_user_message = next((m["content"] for m in reversed(messages) if m["role"] == "user"), "")
if not last_user_message:
return "No user message provided."
try:
# response = self._client.generate(model=self._model_name, prompt=last_user_message)
# return response.text # Or whatever the SDK returns
return f"Response from ExampleAI using model '{self._model_name}': {last_user_message.upper()} (simulated)"
except Exception as e:
print(f"Error calling ExampleAI API: {e}")
return "Sorry, I couldn't get a response from ExampleAI."
def get_model_name(self) -> str:
return self._model_name - Create a new file
-
Update
ai_factory.py
:- Import your new provider class.
- Add it to the
_providers
dictionary.
# ai_factory.py
# ... existing imports
from ai_providers.example_ai_provider import ExampleAIProvider # New import
class AIProviderFactory:
_providers: dict[str, Type[AbstractAIProvider]] = {
"openai": OpenAIAIProvider,
"gemini": GeminiAIProvider,
"example_ai": ExampleAIProvider, # Add new provider here
}
# ... rest of the class
Now, when you run main.py
, you can type example_ai
as your choice, and the system will instantiate and use your new AI provider without any changes to main.py
itself.