实时端点
使用此功能可在 Azure 和 OpenAI 之间进行负载均衡。
代理使用
将模型添加到配置中
- OpenAI
- OpenAI + Azure
model_list:
- model_name: openai-gpt-4o-realtime-audio
litellm_params:
model: openai/gpt-4o-realtime-preview-2024-10-01
api_key: os.environ/OPENAI_API_KEY
model_list:
- model_name: gpt-4o
litellm_params:
model: azure/gpt-4o-realtime-preview
api_key: os.environ/AZURE_SWEDEN_API_KEY
api_base: os.environ/AZURE_SWEDEN_API_BASE
- model_name: openai-gpt-4o-realtime-audio
litellm_params:
model: openai/gpt-4o-realtime-preview-2024-10-01
api_key: os.environ/OPENAI_API_KEY
启动代理
litellm --config /path/to/config.yaml
# 运行在 http://0.0.0.0:8000
测试
使用 node 运行此脚本 - node test.js
// test.js
const WebSocket = require("ws");
const url = "ws://0.0.0.0:4000/v1/realtime?model=openai-gpt-4o-realtime-audio";
// const url = "wss://my-endpoint-sweden-berri992.openai.azure.com/openai/realtime?api-version=2024-10-01-preview&deployment=gpt-4o-realtime-preview";
const ws = new WebSocket(url, {
headers: {
"api-key": `f28ab7b695af4154bc53498e5bdccb07`,
"OpenAI-Beta": "realtime=v1",
},
});
ws.on("open", function open() {
console.log("已连接到服务器。");
ws.send(JSON.stringify({
type: "response.create",
response: {
modalities: ["text"],
instructions: "请协助用户。",
}
}));
});
ws.on("message", function incoming(message) {
console.log(JSON.parse(message.toString()));
});
ws.on("error", function handleError(error) {
console.error("错误: ", error);
});