用Completion()替换OpenAI ChatCompletion
Completion() - 快速开始
import os
from litellm import completion
# openai配置
os.environ["OPENAI_API_KEY"] = ""
# azure openai配置
os.environ["AZURE_API_KEY"] = ""
os.environ["AZURE_API_BASE"] = "https://openai-gpt-4-test-v-1.openai.azure.com/"
os.environ["AZURE_API_VERSION"] = "2023-05-15"
# openai调用
response = completion(
model = "gpt-3.5-turbo",
messages = [{ "content": "Hello, how are you?","role": "user"}]
)
print("Openai Response\n")
print(response)
# azure调用
response = completion(
model = "azure/<your-azure-deployment>",
messages = [{ "content": "Hello, how are you?","role": "user"}]
)
print("Azure Response\n")
print(response)
Completion() 带流式传输
import os
from litellm import completion
# openai配置
os.environ["OPENAI_API_KEY"] = ""
# azure openai配置
os.environ["AZURE_API_KEY"] = ""
os.environ["AZURE_API_BASE"] = "https://openai-gpt-4-test-v-1.openai.azure.com/"
os.environ["AZURE_API_VERSION"] = "2023-05-15"
# openai调用
response = completion(
model = "gpt-3.5-turbo",
messages = [{ "content": "Hello, how are you?","role": "user"}],
stream=True
)
print("OpenAI Streaming response")
for chunk in response:
print(chunk)
# azure调用
response = completion(
model = "azure/<your-azure-deployment>",
messages = [{ "content": "Hello, how are you?","role": "user"}],
stream=True
)
print("Azure Streaming response")
for chunk in response:
print(chunk)
Completion() 带流式传输 + 异步
import os
from litellm import acompletion
# openai配置
os.environ["OPENAI_API_KEY"] = ""
# azure openai配置
os.environ["AZURE_API_KEY"] = ""
os.environ["AZURE_API_BASE"] = "https://openai-gpt-4-test-v-1.openai.azure.com/"
os.environ["AZURE_API_VERSION"] = "2023-05-15"
# openai调用
response = acompletion(
model = "gpt-3.5-turbo",
messages = [{ "content": "Hello, how are you?","role": "user"}],
stream=True
)
# azure调用
response = acompletion(
model = "azure/<your-azure-deployment>",
messages = [{ "content": "Hello, how are you?","role": "user"}],
stream=True
)
Completion() 多线程
import os
import threading
from litellm import completion
# 函数用于进行completion调用
def make_completion(model, messages):
response = completion(
model=model,
messages=messages,
stream=True
)
print(f"Response for {model}: {response}")
# 设置你的API密钥
os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"
os.environ["AZURE_API_KEY"] = "YOUR_AZURE_API_KEY"
# 定义completion的消息
messages = [{"content": "Hello, how are you?", "role": "user"}]
# 创建线程来进行completion调用
thread1 = threading.Thread(target=make_completion, args=("gpt-3.5-turbo", messages))
thread2 = threading.Thread(target=make_completion, args=("azure/your-azure-deployment", messages))
# 启动两个线程
thread1.start()
thread2.start()
# 等待两个线程完成
thread1.join()
thread2.join()
print("两个completion都完成了。")