Skip to main content

模拟 Completion() 响应 - 节省测试成本 💰

为了测试目的,你可以使用 completion()mock_response 来模拟调用 completion 端点。

这将返回一个包含默认响应的响应对象(也适用于流式处理),而不会调用 LLM API。

快速开始

from litellm import completion 

model = "gpt-3.5-turbo"
messages = [{"role":"user", "content":"这是一个测试请求"}]

completion(model=model, messages=messages, mock_response="使用起来很简单,容易上手")

流式处理

from litellm import completion 
model = "gpt-3.5-turbo"
messages = [{"role": "user", "content": "嘿,我是一个模拟请求"}]
response = completion(model=model, messages=messages, stream=True, mock_response="使用起来很简单,容易上手")
for chunk in response:
print(chunk) # {'choices': [{'delta': {'role': 'assistant', 'content': '这'}, 'finish_reason': None}]}
complete_response += chunk["choices"][0]["delta"]["content"]

(非流式)模拟响应对象

{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {
"content": "这是一个模拟请求",
"role": "assistant",
"logprobs": null
}
}
],
"created": 1694459929.4496052,
"model": "MockResponse",
"usage": {
"prompt_tokens": null,
"completion_tokens": null,
"total_tokens": null
}
}

使用 completionmock_response 构建 pytest 函数

from litellm import completion
import pytest

def test_completion_openai():
try:
response = completion(
model="gpt-3.5-turbo",
messages=[{"role":"user", "content":"为什么 LiteLLM 很棒?"}],
mock_response="LiteLLM 很棒"
)
# 在这里添加任何断言来检查响应
print(response)
assert(response['choices'][0]['message']['content'] == "LiteLLM 很棒")
except Exception as e:
pytest.fail(f"发生错误: {e}")
优云智算