OpenAI SDK Usage
Mixroute Api is fully compatible with the official OpenAI SDK. Simply change the base URL to use Mixroute Api with your existing OpenAI SDK code.Installation
Copy
Ask AI
pip install openai
Configuration
Python
Copy
Ask AI
from openai import OpenAI
client = OpenAI(
api_key="sk-xxxxxxxxxx",
base_url="https://console.mixroute.io/v1"
)
Node.js / TypeScript
Copy
Ask AI
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: 'sk-xxxxxxxxxx',
baseURL: 'https://console.mixroute.io/v1'
});
Go
Copy
Ask AI
package main
import (
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
client := openai.NewClient(
option.WithAPIKey("sk-xxxxxxxxxx"),
option.WithBaseURL("https://console.mixroute.io/v1"),
)
}
Chat Completions
Basic Usage
Copy
Ask AI
from openai import OpenAI
client = OpenAI(
api_key="sk-xxxxxxxxxx",
base_url="https://console.mixroute.io/v1"
)
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
)
print(response.choices[0].message.content)
Streaming
Copy
Ask AI
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Tell me a story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
Node.js Streaming
Copy
Ask AI
const stream = await client.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Tell me a story' }],
stream: true
});
for await (const chunk of stream) {
process.stdout.write(chunk.choices[0]?.delta?.content || '');
}
Using Different Models
Mixroute Api supports models from multiple providers using the same SDK:Copy
Ask AI
# OpenAI models
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello"}]
)
# Anthropic Claude (via OpenAI format)
response = client.chat.completions.create(
model="claude-3-5-sonnet-20241022",
messages=[{"role": "user", "content": "Hello"}]
)
# Google Gemini (via OpenAI format)
response = client.chat.completions.create(
model="gemini-1.5-pro",
messages=[{"role": "user", "content": "Hello"}]
)
# Meta Llama
response = client.chat.completions.create(
model="llama-3.1-70b",
messages=[{"role": "user", "content": "Hello"}]
)
Vision (Multimodal)
Copy
Ask AI
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {"url": "https://console.mixroute.io/image.jpg"}
}
]
}
]
)
Base64 Image
Copy
Ask AI
import base64
with open("image.jpg", "rb") as f:
image_data = base64.b64encode(f.read()).decode()
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image"},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"}
}
]
}
]
)
Function Calling
Copy
Ask AI
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
}
},
"required": ["location"]
}
}
}
]
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "What's the weather in Tokyo?"}],
tools=tools,
tool_choice="auto"
)
# Check if model wants to call a function
if response.choices[0].message.tool_calls:
tool_call = response.choices[0].message.tool_calls[0]
print(f"Function: {tool_call.function.name}")
print(f"Arguments: {tool_call.function.arguments}")
Embeddings
Copy
Ask AI
response = client.embeddings.create(
model="text-embedding-3-small",
input="The quick brown fox jumps over the lazy dog"
)
embedding = response.data[0].embedding
print(f"Dimensions: {len(embedding)}")
Batch Embeddings
Copy
Ask AI
response = client.embeddings.create(
model="text-embedding-3-small",
input=[
"First document",
"Second document",
"Third document"
]
)
for i, item in enumerate(response.data):
print(f"Document {i}: {len(item.embedding)} dimensions")
Image Generation
Copy
Ask AI
response = client.images.generate(
model="dall-e-3",
prompt="A serene Japanese garden with cherry blossoms",
size="1024x1024",
quality="standard",
n=1
)
image_url = response.data[0].url
print(image_url)
List Models
Copy
Ask AI
models = client.models.list()
for model in models.data:
print(f"{model.id} - {model.owned_by}")
Error Handling
Copy
Ask AI
from openai import OpenAI, APIError, RateLimitError, APIConnectionError
client = OpenAI(
api_key="sk-xxxxxxxxxx",
base_url="https://console.mixroute.io/v1"
)
try:
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello"}]
)
except RateLimitError:
print("Rate limit exceeded. Please retry later.")
except APIConnectionError:
print("Connection error. Check your network.")
except APIError as e:
print(f"API error: {e.message}")
Async Usage
Copy
Ask AI
import asyncio
from openai import AsyncOpenAI
client = AsyncOpenAI(
api_key="sk-xxxxxxxxxx",
base_url="https://console.mixroute.io/v1"
)
async def main():
response = await client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
asyncio.run(main())
Environment Variables
Set environment variables instead of hardcoding:Copy
Ask AI
export OPENAI_API_KEY="sk-xxxxxxxxxx"
export OPENAI_BASE_URL="https://console.mixroute.io/v1"
Copy
Ask AI
from openai import OpenAI
# Automatically reads from environment variables
client = OpenAI()
Best Practices
- Use environment variables for API keys
- Implement retry logic for transient errors
- Use streaming for better UX with long responses
- Batch requests when processing multiple items
- Monitor usage to stay within rate limits