OpenAI SDK Usage
MixRoute is fully compatible with the official OpenAI SDK. Simply change the base URL to use MixRoute with your existing OpenAI SDK code.Installation
pip install openai
Configuration
Python
from openai import OpenAI
client = OpenAI(
api_key="sk-xxxxxxxxxx",
base_url="https://api.mixroute.ai/v1"
)
Node.js / TypeScript
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: 'sk-xxxxxxxxxx',
baseURL: 'https://api.mixroute.ai/v1'
});
Go
package main
import (
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
client := openai.NewClient(
option.WithAPIKey("sk-xxxxxxxxxx"),
option.WithBaseURL("https://api.mixroute.ai/v1"),
)
}
Chat Completions
Basic Usage
from openai import OpenAI
client = OpenAI(
api_key="sk-xxxxxxxxxx",
base_url="https://api.mixroute.ai/v1"
)
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
)
print(response.choices[0].message.content)
Streaming
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Tell me a story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
Node.js Streaming
const stream = await client.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Tell me a story' }],
stream: true
});
for await (const chunk of stream) {
process.stdout.write(chunk.choices[0]?.delta?.content || '');
}
Using Different Models
Mixroute Api supports models from multiple providers using the same SDK:# OpenAI models
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello"}]
)
# Anthropic Claude (via OpenAI format)
response = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[{"role": "user", "content": "Hello"}]
)
# Google Gemini (via OpenAI format)
response = client.chat.completions.create(
model="gemini-1.5-pro",
messages=[{"role": "user", "content": "Hello"}]
)
# Meta Llama
response = client.chat.completions.create(
model="llama-3.1-70b",
messages=[{"role": "user", "content": "Hello"}]
)
Vision (Multimodal)
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {"url": "https://api.mixroute.ai/image.jpg"}
}
]
}
]
)
Base64 Image
import base64
with open("image.jpg", "rb") as f:
image_data = base64.b64encode(f.read()).decode()
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image"},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"}
}
]
}
]
)
Function Calling
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
}
},
"required": ["location"]
}
}
}
]
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "What's the weather in Tokyo?"}],
tools=tools,
tool_choice="auto"
)
# Check if model wants to call a function
if response.choices[0].message.tool_calls:
tool_call = response.choices[0].message.tool_calls[0]
print(f"Function: {tool_call.function.name}")
print(f"Arguments: {tool_call.function.arguments}")
Embeddings
response = client.embeddings.create(
model="text-embedding-3-small",
input="The quick brown fox jumps over the lazy dog"
)
embedding = response.data[0].embedding
print(f"Dimensions: {len(embedding)}")
Batch Embeddings
response = client.embeddings.create(
model="text-embedding-3-small",
input=[
"First document",
"Second document",
"Third document"
]
)
for i, item in enumerate(response.data):
print(f"Document {i}: {len(item.embedding)} dimensions")
Image Generation
response = client.images.generate(
model="dall-e-3",
prompt="A serene Japanese garden with cherry blossoms",
size="1024x1024",
quality="standard",
n=1
)
image_url = response.data[0].url
print(image_url)
List Models
models = client.models.list()
for model in models.data:
print(f"{model.id} - {model.owned_by}")
Error Handling
from openai import OpenAI, APIError, RateLimitError, APIConnectionError
client = OpenAI(
api_key="sk-xxxxxxxxxx",
base_url="https://api.mixroute.ai/v1"
)
try:
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello"}]
)
except RateLimitError:
print("Rate limit exceeded. Please retry later.")
except APIConnectionError:
print("Connection error. Check your network.")
except APIError as e:
print(f"API error: {e.message}")
Async Usage
import asyncio
from openai import AsyncOpenAI
client = AsyncOpenAI(
api_key="sk-xxxxxxxxxx",
base_url="https://api.mixroute.ai/v1"
)
async def main():
response = await client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
asyncio.run(main())
Environment Variables
Set environment variables instead of hardcoding:export OPENAI_API_KEY="sk-xxxxxxxxxx"
export OPENAI_BASE_URL="https://api.mixroute.ai/v1"
from openai import OpenAI
# Automatically reads from environment variables
client = OpenAI()
Best Practices
- Use environment variables for API keys
- Implement retry logic for transient errors
- Use streaming for better UX with long responses
- Batch requests when processing multiple items
- Monitor usage to stay within rate limits