autogen_ext.models.azure#

class AzureAIChatCompletionClient(**kwargs: Unpack)[源代码]#

基础类:ChatCompletionClient

用于托管在Azure AI Foundry或GitHub Models上的模型的聊天完成客户端。更多信息请参见这里

Parameters:
  • endpoint (str) – 使用的端点。必填。

  • credential (union, AzureKeyCredential, AsyncTokenCredential) – 使用的凭证。必需

  • model_info (ModelInfo) – 模型的家族和能力。 必需。

  • model (str) – 模型的名称。 如果模型托管在GitHub Models上,则需要提供。

  • frequency_penalty – (可选,浮点数)

  • presence_penalty – (可选, 浮点数)

  • temperature – (可选,浮点数)

  • top_p – (可选, float)

  • max_tokens – (可选, int)

  • response_format – (可选的,字面量 ["text", "json_object"])

  • stop – (可选, List[str])

  • 工具 – (可选,List[ChatCompletionsToolDefinition])

  • tool_choice – (可选,Union[str, ChatCompletionsToolChoicePreset, ChatCompletionsNamedToolChoice]])

  • seed – (可选, int)

  • model_extras – (可选, Dict[str, Any])

要使用此客户端,您必须安装 azure-ai-inference 扩展:

pip install "autogen-ext[azure]"

以下代码片段展示了如何使用客户端:

import asyncio
from azure.core.credentials import AzureKeyCredential
from autogen_ext.models.azure import AzureAIChatCompletionClient
from autogen_core.models import UserMessage


async def main():
    client = AzureAIChatCompletionClient(
        endpoint="endpoint",
        credential=AzureKeyCredential("api_key"),
        model_info={
            "json_output": False,
            "function_calling": False,
            "vision": False,
            "family": "unknown",
        },
    )

    result = await client.create([UserMessage(content="What is the capital of France?", source="user")])
    print(result)


if __name__ == "__main__":
    asyncio.run(main())
actual_usage() RequestUsage[源代码]#
add_usage(usage: RequestUsage) [源代码]#
property capabilities: ModelInfo#
async close() [源代码]#
count_tokens(messages: Sequence[已注解[系统消息 | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[工具 | 工具模式] = []) int[源代码]#
async create(messages: Sequence[已注解[系统消息 | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[工具 | 工具模式] = [], json_output: bool | = None, extra_create_args: 映射[str, 任何] = {}, cancellation_token: CancellationToken | = None) CreateResult[源代码]#
async create_stream(messages: Sequence[已注解[系统消息 | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[工具 | 工具模式] = [], json_output: bool | = None, extra_create_args: 映射[str, 任何] = {}, cancellation_token: CancellationToken | = None) AsyncGenerator[str | CreateResult, ][源代码]#
property model_info: ModelInfo#
remaining_tokens(messages: Sequence[已注解[系统消息 | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[工具 | 工具模式] = []) int[源代码]#
total_usage() RequestUsage[源代码]#
class AzureAIChatCompletionClientConfig[源代码]#

基类:dict

credential: AzureKeyCredential | AsyncTokenCredential#
endpoint: str#
frequency_penalty: float | #
max_tokens: int | #
model: str | #
model_extras: 字典[str, 任何] | #
model_info: ModelInfo#
presence_penalty: float | #
response_format: 字面量['text', 'json_object'] | #
seed: int | #
stop: 列表[str] | #
temperature: float | #
tool_choice: str | ChatCompletionsToolChoicePreset | ChatCompletionsNamedToolChoice | #
tools: 列表[ChatCompletionsToolDefinition] | #
top_p: float | #