diff --git a/src/llama_stack_client/types/response_object.py b/src/llama_stack_client/types/response_object.py index 0088d363..ba14c71e 100644 --- a/src/llama_stack_client/types/response_object.py +++ b/src/llama_stack_client/types/response_object.py @@ -40,6 +40,9 @@ "OutputOpenAIResponseOutputMessageMcpListTools", "OutputOpenAIResponseOutputMessageMcpListToolsTool", "OutputOpenAIResponseMcpApprovalRequest", + "OutputOpenAIResponseReasoningItem", + "OutputOpenAIResponseReasoningItemContent", + "OutputOpenAIResponseReasoningItemSummary", "Error", "IncompleteDetails", "Prompt", @@ -403,12 +406,49 @@ class OutputOpenAIResponseMcpApprovalRequest(BaseModel): type: Optional[Literal["mcp_approval_request"]] = None +class OutputOpenAIResponseReasoningItemContent(BaseModel): + """Reasoning text content from the model's chain-of-thought.""" + + text: str + + type: Optional[Literal["reasoning_text"]] = None + + +class OutputOpenAIResponseReasoningItemSummary(BaseModel): + """Summary of the model's reasoning output.""" + + text: str + + type: Optional[Literal["summary_text"]] = None + + +class OutputOpenAIResponseReasoningItem(BaseModel): + """Reasoning output item for OpenAI responses. + + Contains the model's chain-of-thought reasoning, either as raw content + (open-source models) or as a summary (closed-source models). + """ + + id: str + + summary: List[OutputOpenAIResponseReasoningItemSummary] + + content: Optional[List[OutputOpenAIResponseReasoningItemContent]] = None + + encrypted_content: Optional[str] = None + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + + type: Optional[Literal["reasoning"]] = None + + Output: TypeAlias = Annotated[ Union[ OutputOpenAIResponseMessageOutput, OutputOpenAIResponseOutputMessageWebSearchToolCall, OutputOpenAIResponseOutputMessageFileSearchToolCall, OutputOpenAIResponseOutputMessageFunctionToolCall, + OutputOpenAIResponseReasoningItem, OutputOpenAIResponseOutputMessageMcpCall, OutputOpenAIResponseOutputMessageMcpListTools, OutputOpenAIResponseMcpApprovalRequest, diff --git a/src/llama_stack_client/types/response_object_stream.py b/src/llama_stack_client/types/response_object_stream.py index dc3e3e79..bcc69265 100644 --- a/src/llama_stack_client/types/response_object_stream.py +++ b/src/llama_stack_client/types/response_object_stream.py @@ -42,6 +42,9 @@ "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListTools", "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListToolsTool", "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMcpApprovalRequest", + "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseReasoningItem", + "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseReasoningItemContent", + "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseReasoningItemSummary", "OpenAIResponseObjectStreamResponseOutputItemDone", "OpenAIResponseObjectStreamResponseOutputItemDoneItem", "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessage", @@ -67,6 +70,9 @@ "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListTools", "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListToolsTool", "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMcpApprovalRequest", + "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseReasoningItem", + "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseReasoningItemContent", + "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseReasoningItemSummary", "OpenAIResponseObjectStreamResponseOutputTextDelta", "OpenAIResponseObjectStreamResponseOutputTextDeltaLogprob", "OpenAIResponseObjectStreamResponseOutputTextDeltaLogprobTopLogprob", @@ -491,12 +497,47 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMcpAppr type: Optional[Literal["mcp_approval_request"]] = None +class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseReasoningItemContent(BaseModel): + """Reasoning text content from the model's chain-of-thought.""" + + text: str + + type: Optional[Literal["reasoning_text"]] = None + + +class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseReasoningItemSummary(BaseModel): + """Summary of the model's reasoning output.""" + + text: str + + type: Optional[Literal["summary_text"]] = None + + +class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseReasoningItem(BaseModel): + """Reasoning output item for OpenAI responses.""" + + id: str + + summary: List[OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseReasoningItemSummary] + + content: Optional[List[OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseReasoningItemContent]] = ( + None + ) + + encrypted_content: Optional[str] = None + + status: Optional[str] = None + + type: Optional[Literal["reasoning"]] = None + + OpenAIResponseObjectStreamResponseOutputItemAddedItem: TypeAlias = Annotated[ Union[ OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage, OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageWebSearchToolCall, OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFileSearchToolCall, OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFunctionToolCall, + OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseReasoningItem, OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpCall, OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListTools, OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMcpApprovalRequest, @@ -860,12 +901,47 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMcpAppro type: Optional[Literal["mcp_approval_request"]] = None +class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseReasoningItemContent(BaseModel): + """Reasoning text content from the model's chain-of-thought.""" + + text: str + + type: Optional[Literal["reasoning_text"]] = None + + +class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseReasoningItemSummary(BaseModel): + """Summary of the model's reasoning output.""" + + text: str + + type: Optional[Literal["summary_text"]] = None + + +class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseReasoningItem(BaseModel): + """Reasoning output item for OpenAI responses.""" + + id: str + + summary: List[OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseReasoningItemSummary] + + content: Optional[List[OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseReasoningItemContent]] = ( + None + ) + + encrypted_content: Optional[str] = None + + status: Optional[str] = None + + type: Optional[Literal["reasoning"]] = None + + OpenAIResponseObjectStreamResponseOutputItemDoneItem: TypeAlias = Annotated[ Union[ OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessage, OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageWebSearchToolCall, OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFileSearchToolCall, OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFunctionToolCall, + OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseReasoningItem, OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpCall, OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListTools, OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMcpApprovalRequest,