Appearance
OpenRouter 对话模型 API 文档
Z.ai: GLM 5
py
import { OpenRouter } from "@openrouter/sdk";
const openrouter = new OpenRouter({
apiKey: "<OPENROUTER_API_KEY>"
});
// Stream the response to get reasoning tokens in usage
const stream = await openrouter.chat.send({
model: "z-ai/glm-5",
messages: [
{
role: "user",
content: "How many r's are in the word 'strawberry'?"
}
],
stream: true
});
let response = "";
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
response += content;
process.stdout.write(content);
}
// Usage information comes in the final chunk
if (chunk.usage) {
console.log("\nReasoning tokens:", chunk.usage.reasoningTokens);
}
}xAI: Grok 4 Fast
py
from openai import OpenAI
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key="<OPENROUTER_API_KEY>",
)
completion = client.chat.completions.create(
extra_headers={
"HTTP-Referer": "<YOUR_SITE_URL>", # Optional. Site URL for rankings on openrouter.ai.
"X-OpenRouter-Title": "<YOUR_SITE_NAME>", # Optional. Site title for rankings on openrouter.ai.
},
extra_body={},
model="x-ai/grok-4-fast",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "What is in this image?"
},
{
"type": "image_url",
"image_url": {
"url": "https://live.staticflickr.com/3851/14825276609_098cac593d_b.jpg"
}
}
]
}
]
)
print(completion.choices[0].message.content)Google: Gemini 3.1 Flash Lite Preview
py
from openai import OpenAI
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key="<OPENROUTER_API_KEY>",
)
# First API call with reasoning
response = client.chat.completions.create(
model="google/gemini-3.1-flash-lite-preview",
messages=[
{
"role": "user",
"content": "How many r's are in the word 'strawberry'?"
}
],
extra_body={"reasoning": {"enabled": True}}
)
# Extract the assistant message with reasoning_details
response = response.choices[0].message
# Preserve the assistant message with reasoning_details
messages = [
{"role": "user", "content": "How many r's are in the word 'strawberry'?"},
{
"role": "assistant",
"content": response.content,
"reasoning_details": response.reasoning_details # Pass back unmodified
},
{"role": "user", "content": "Are you sure? Think carefully."}
]
# Second API call - model continues reasoning from where it left off
response2 = client.chat.completions.create(
model="google/gemini-3.1-flash-lite-preview",
messages=messages,
extra_body={"reasoning": {"enabled": True}}
)Qwen: Qwen3.5-9B
py
from openai import OpenAI
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key="<OPENROUTER_API_KEY>",
)
# First API call with reasoning
response = client.chat.completions.create(
model="qwen/qwen3.5-9b",
messages=[
{
"role": "user",
"content": "How many r's are in the word 'strawberry'?"
}
],
extra_body={"reasoning": {"enabled": True}}
)
# Extract the assistant message with reasoning_details
response = response.choices[0].message
# Preserve the assistant message with reasoning_details
messages = [
{"role": "user", "content": "How many r's are in the word 'strawberry'?"},
{
"role": "assistant",
"content": response.content,
"reasoning_details": response.reasoning_details # Pass back unmodified
},
{"role": "user", "content": "Are you sure? Think carefully."}
]
# Second API call - model continues reasoning from where it left off
response2 = client.chat.completions.create(
model="qwen/qwen3.5-9b",
messages=messages,
extra_body={"reasoning": {"enabled": True}}
)ByteDance Seed: Seed-2.0-Lite
py
from openai import OpenAI
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key="<OPENROUTER_API_KEY>",
)
# First API call with reasoning
response = client.chat.completions.create(
model="bytedance-seed/seed-2.0-lite",
messages=[
{
"role": "user",
"content": "How many r's are in the word 'strawberry'?"
}
],
extra_body={"reasoning": {"enabled": True}}
)
# Extract the assistant message with reasoning_details
response = response.choices[0].message
# Preserve the assistant message with reasoning_details
messages = [
{"role": "user", "content": "How many r's are in the word 'strawberry'?"},
{
"role": "assistant",
"content": response.content,
"reasoning_details": response.reasoning_details # Pass back unmodified
},
{"role": "user", "content": "Are you sure? Think carefully."}
]
# Second API call - model continues reasoning from where it left off
response2 = client.chat.completions.create(
model="bytedance-seed/seed-2.0-lite",
messages=messages,
extra_body={"reasoning": {"enabled": True}}
)OpenAI: GPT-4o
py
from openai import OpenAI
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key="<OPENROUTER_API_KEY>",
)
completion = client.chat.completions.create(
extra_headers={
"HTTP-Referer": "<YOUR_SITE_URL>", # Optional. Site URL for rankings on openrouter.ai.
"X-OpenRouter-Title": "<YOUR_SITE_NAME>", # Optional. Site title for rankings on openrouter.ai.
},
extra_body={},
model="openai/gpt-4o",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "What is in this image?"
},
{
"type": "image_url",
"image_url": {
"url": "https://live.staticflickr.com/3851/14825276609_098cac593d_b.jpg"
}
}
]
}
]
)
print(completion.choices[0].message.content)