diff --git a/pydantic_ai_slim/pydantic_ai/_output.py b/pydantic_ai_slim/pydantic_ai/_output.py index 7af96aa32..135feebad 100644 --- a/pydantic_ai_slim/pydantic_ai/_output.py +++ b/pydantic_ai_slim/pydantic_ai/_output.py @@ -28,7 +28,7 @@ ToolOutput, _OutputSpecItem, # type: ignore[reportPrivateUsage] ) -from .tools import GenerateToolJsonSchema, ObjectJsonSchema, ToolDefinition +from .tools import FunctionTextFormat, GenerateToolJsonSchema, ObjectJsonSchema, ToolDefinition from .toolsets.abstract import AbstractToolset, ToolsetTool if TYPE_CHECKING: @@ -591,6 +591,7 @@ class OutputObjectDefinition: name: str | None = None description: str | None = None strict: bool | None = None + text_format: Literal['text'] | FunctionTextFormat | None = None @dataclass(init=False) @@ -621,6 +622,7 @@ def __init__( name: str | None = None, description: str | None = None, strict: bool | None = None, + text_format: Literal['text'] | FunctionTextFormat | None = None, ): if inspect.isfunction(output) or inspect.ismethod(output): self._function_schema = _function_schema.function_schema(output, GenerateToolJsonSchema) @@ -663,6 +665,7 @@ def __init__( description=description, json_schema=json_schema, strict=strict, + text_format=text_format, ) async def process( @@ -920,11 +923,13 @@ def build( name = None description = None strict = None + text_format = None if isinstance(output, ToolOutput): # do we need to error on conflicts here? (DavidM): If this is internal maybe doesn't matter, if public, use overloads name = output.name description = output.description strict = output.strict + text_format = output.text_format output = output.output @@ -932,7 +937,9 @@ def build( if strict is None: strict = default_strict - processor = ObjectOutputProcessor(output=output, description=description, strict=strict) + processor = ObjectOutputProcessor( + output=output, description=description, strict=strict, text_format=text_format + ) object_def = processor.object_def if name is None: @@ -957,6 +964,7 @@ def build( description=description, parameters_json_schema=object_def.json_schema, strict=object_def.strict, + text_format=object_def.text_format, outer_typed_dict_key=processor.outer_typed_dict_key, kind='output', ) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 1eab46d7e..a4c1d928e 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -8,7 +8,7 @@ from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontextmanager, contextmanager from contextvars import ContextVar -from typing import TYPE_CHECKING, Any, Callable, ClassVar, cast, overload +from typing import TYPE_CHECKING, Any, Callable, ClassVar, Literal, cast, overload from opentelemetry.trace import NoOpTracer, use_span from pydantic.json_schema import GenerateJsonSchema @@ -39,6 +39,7 @@ from ..tools import ( AgentDepsT, DocstringFormat, + FunctionTextFormat, GenerateToolJsonSchema, RunContext, Tool, @@ -963,6 +964,7 @@ def tool( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['text'] | FunctionTextFormat | None = None, ) -> Callable[[ToolFuncContext[AgentDepsT, ToolParams]], ToolFuncContext[AgentDepsT, ToolParams]]: ... def tool( @@ -977,6 +979,7 @@ def tool( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['text'] | FunctionTextFormat | None = None, ) -> Any: """Decorator to register a tool function which takes [`RunContext`][pydantic_ai.tools.RunContext] as its first argument. @@ -1021,6 +1024,8 @@ async def spam(ctx: RunContext[str], y: float) -> float: schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`. strict: Whether to enforce JSON schema compliance (only affects OpenAI). See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. + text_format: Used to invoke the function using free-form function calling (only affects OpenAI). + See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. """ def tool_decorator( @@ -1037,6 +1042,7 @@ def tool_decorator( require_parameter_descriptions, schema_generator, strict, + text_format, ) return func_ @@ -1057,6 +1063,7 @@ def tool_plain( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['text'] | FunctionTextFormat | None = None, ) -> Callable[[ToolFuncPlain[ToolParams]], ToolFuncPlain[ToolParams]]: ... def tool_plain( @@ -1071,6 +1078,7 @@ def tool_plain( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['text'] | FunctionTextFormat | None = None, ) -> Any: """Decorator to register a tool function which DOES NOT take `RunContext` as an argument. @@ -1115,6 +1123,8 @@ async def spam(ctx: RunContext[str]) -> float: schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`. strict: Whether to enforce JSON schema compliance (only affects OpenAI). See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. + text_format: Used to invoke the function using free-form function calling (only affects OpenAI). + See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. """ def tool_decorator(func_: ToolFuncPlain[ToolParams]) -> ToolFuncPlain[ToolParams]: @@ -1129,6 +1139,7 @@ def tool_decorator(func_: ToolFuncPlain[ToolParams]) -> ToolFuncPlain[ToolParams require_parameter_descriptions, schema_generator, strict, + text_format, ) return func_ diff --git a/pydantic_ai_slim/pydantic_ai/models/openai.py b/pydantic_ai_slim/pydantic_ai/models/openai.py index 736c894a6..eaa7caecd 100644 --- a/pydantic_ai_slim/pydantic_ai/models/openai.py +++ b/pydantic_ai_slim/pydantic_ai/models/openai.py @@ -74,6 +74,7 @@ from openai.types.responses.response_input_param import FunctionCallOutput, Message from openai.types.shared import ReasoningEffort from openai.types.shared_params import Reasoning + from openai.types.shared_params.custom_tool_input_format import CustomToolInputFormat except ImportError as _import_error: raise ImportError( 'Please install `openai` to use the OpenAI model, ' @@ -762,7 +763,7 @@ async def request( response = await self._responses_create( messages, False, cast(OpenAIResponsesModelSettings, model_settings or {}), model_request_parameters ) - return self._process_response(response) + return self._process_response(response, model_request_parameters) @asynccontextmanager async def request_stream( @@ -779,7 +780,11 @@ async def request_stream( async with response: yield await self._process_streamed_response(response, model_request_parameters) - def _process_response(self, response: responses.Response) -> ModelResponse: + def _process_response( + self, + response: responses.Response, + model_request_parameters: ModelRequestParameters, + ) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" timestamp = number_to_datetime(response.created_at) items: list[ModelResponsePart] = [] @@ -795,6 +800,16 @@ def _process_response(self, response: responses.Response) -> ModelResponse: items.append(TextPart(content.text)) elif item.type == 'function_call': items.append(ToolCallPart(item.name, item.arguments, tool_call_id=item.call_id)) + elif item.type == 'custom_tool_call': + if item.name not in model_request_parameters.tool_defs: + raise UnexpectedModelBehavior(f'Unknown tool called: {item.name}') + tool = model_request_parameters.tool_defs[item.name] + argument_name = tool.single_string_argument_name + if argument_name is None: + raise UnexpectedModelBehavior( + f'Custom tool call made to function {item.name} which has unexpected arguments' + ) + items.append(ToolCallPart(item.name, {argument_name: item.input}, tool_call_id=item.call_id)) return ModelResponse( items, usage=_map_usage(response), @@ -893,11 +908,14 @@ async def _responses_create( try: extra_headers = model_settings.get('extra_headers', {}) extra_headers.setdefault('User-Agent', get_user_agent()) + parallel_tool_calls = self._get_parallel_tool_calling( + model_settings=model_settings, model_request_parameters=model_request_parameters + ) return await self.client.responses.create( input=openai_messages, model=self._model_name, instructions=instructions, - parallel_tool_calls=model_settings.get('parallel_tool_calls', NOT_GIVEN), + parallel_tool_calls=parallel_tool_calls, tools=tools or NOT_GIVEN, tool_choice=tool_choice or NOT_GIVEN, max_output_tokens=model_settings.get('max_tokens', NOT_GIVEN), @@ -937,7 +955,18 @@ def _get_reasoning(self, model_settings: OpenAIResponsesModelSettings) -> Reason return NOT_GIVEN return Reasoning(effort=reasoning_effort, summary=reasoning_summary) - def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[responses.FunctionToolParam]: + def _get_parallel_tool_calling( + self, model_settings: OpenAIResponsesModelSettings, model_request_parameters: ModelRequestParameters + ) -> bool | NotGiven: + if any(tool_definition.text_format for tool_definition in model_request_parameters.tool_defs.values()): + return False + if any(tool_definition.text_format for tool_definition in model_request_parameters.output_tools): + return False + return model_settings.get('parallel_tool_calls', NOT_GIVEN) + + def _get_tools( + self, model_request_parameters: ModelRequestParameters + ) -> list[responses.FunctionToolParam | responses.CustomToolParam]: return [self._map_tool_definition(r) for r in model_request_parameters.tool_defs.values()] def _get_builtin_tools(self, model_request_parameters: ModelRequestParameters) -> list[responses.ToolParam]: @@ -960,15 +989,35 @@ def _get_builtin_tools(self, model_request_parameters: ModelRequestParameters) - ) return tools - def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam: + def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam | responses.CustomToolParam: + model_profile = OpenAIModelProfile.from_profile(self.profile) + if f.text_format: + if not model_profile.openai_supports_freeform_function_calling: + raise UserError( + f'`{f.name}` is uses free-form function calling but {self._model_name} does not support free form function calling.' + ) + if not f.only_takes_string_argument: + raise UserError( + f'`{f.name}` is set as a free-form function but does not take a single string argument.' + ) + if f.text_format == 'text': + format: CustomToolInputFormat = {'type': 'text'} + else: + format = {'type': 'grammar', 'syntax': f.text_format.syntax, 'definition': f.text_format.grammar} + tool_param: responses.CustomToolParam = { + 'name': f.name, + 'type': 'custom', + 'description': f.description or '', + 'format': format, + } + return tool_param + return { 'name': f.name, 'parameters': f.parameters_json_schema, 'type': 'function', 'description': f.description, - 'strict': bool( - f.strict and OpenAIModelProfile.from_profile(self.profile).openai_supports_strict_tool_definition - ), + 'strict': bool(f.strict and model_profile.openai_supports_strict_tool_definition), } async def _map_messages( diff --git a/pydantic_ai_slim/pydantic_ai/output.py b/pydantic_ai_slim/pydantic_ai/output.py index d61eb6174..722c6cf5c 100644 --- a/pydantic_ai_slim/pydantic_ai/output.py +++ b/pydantic_ai_slim/pydantic_ai/output.py @@ -11,7 +11,7 @@ from . import _utils from .messages import ToolCallPart -from .tools import RunContext, ToolDefinition +from .tools import FunctionTextFormat, RunContext, ToolDefinition __all__ = ( # classes @@ -112,6 +112,8 @@ class Vehicle(BaseModel): """The maximum number of retries for the tool.""" strict: bool | None """Whether to use strict mode for the tool.""" + text_format: Literal['text'] | FunctionTextFormat | None = None + """Whether to invoke the function with free-form function calling for tool calls.""" def __init__( self, @@ -121,12 +123,14 @@ def __init__( description: str | None = None, max_retries: int | None = None, strict: bool | None = None, + text_format: Literal['text'] | FunctionTextFormat | None = None, ): self.output = type_ self.name = name self.description = description self.max_retries = max_retries self.strict = strict + self.text_format = text_format @dataclass(init=False) diff --git a/pydantic_ai_slim/pydantic_ai/profiles/openai.py b/pydantic_ai_slim/pydantic_ai/profiles/openai.py index 60d782603..210462106 100644 --- a/pydantic_ai_slim/pydantic_ai/profiles/openai.py +++ b/pydantic_ai_slim/pydantic_ai/profiles/openai.py @@ -33,10 +33,16 @@ class OpenAIModelProfile(ModelProfile): openai_system_prompt_role: OpenAISystemPromptRole | None = None """The role to use for the system prompt message. If not provided, defaults to `'system'`.""" + # GPT-5 introduced support for directly calling a function with a string. + openai_supports_freeform_function_calling: bool = False + """Whether the provider accepts the value ``type='custom'`` for tools in the + request payload.""" + def openai_model_profile(model_name: str) -> ModelProfile: """Get the model profile for an OpenAI model.""" is_reasoning_model = model_name.startswith('o') or model_name.startswith('gpt-5') + is_freeform_function_calling_model = model_name.startswith('gpt-5') # Structured Outputs (output mode 'native') is only supported with the gpt-4o-mini, gpt-4o-mini-2024-07-18, and gpt-4o-2024-08-06 model snapshots and later. # We leave it in here for all models because the `default_structured_output_mode` is `'tool'`, so `native` is only used # when the user specifically uses the `NativeOutput` marker, so an error from the API is acceptable. @@ -50,6 +56,7 @@ def openai_model_profile(model_name: str) -> ModelProfile: supports_json_schema_output=True, supports_json_object_output=True, openai_supports_sampling_settings=not is_reasoning_model, + openai_supports_freeform_function_calling=is_freeform_function_calling_model, openai_system_prompt_role=openai_system_prompt_role, ) diff --git a/pydantic_ai_slim/pydantic_ai/tools.py b/pydantic_ai_slim/pydantic_ai/tools.py index 9d9d7ac11..7f6e0e6e7 100644 --- a/pydantic_ai_slim/pydantic_ai/tools.py +++ b/pydantic_ai_slim/pydantic_ai/tools.py @@ -1,8 +1,10 @@ from __future__ import annotations as _annotations +import re from collections.abc import Awaitable, Sequence from dataclasses import dataclass, field from typing import Any, Callable, Generic, Literal, Union +from warnings import warn from pydantic.json_schema import GenerateJsonSchema, JsonSchemaValue from pydantic_core import SchemaValidator, core_schema @@ -130,6 +132,55 @@ async def turn_on_strict_if_openai( A = TypeVar('A') +@dataclass +class FunctionTextFormat: + """Used to invoke the function with free-form function calling for tool calls. + + This class encapsulates the settings related to free-form function calling + as well as constraining the function call argument to a specific grammar. + The function must take a single string argument. + + Calling a function in this way prevents parallel tool calling. + + Note: this is currently only supported by OpenAI gpt-5 models. + """ + + syntax: Literal['lark', 'regex'] + """The syntax type for the grammar to constrain the free-form function call. + + For 'lark' the grammar attribute contains the lark grammar that the text must + conform to. + For 'regex' the grammar attribute contains the regex pattern that the text must + conform to. + """ + grammar: str + """The grammar to constrain the free-form function call. + + When the syntax is 'lark' this attribute contains the lark grammar that the text must + conform to. + When the syntax is 'regex' this attribute contains the regex pattern that the text must + conform to. + """ + + def __post_init__(self) -> None: + if self.syntax == 'lark': + try: + import lark + from lark.exceptions import GrammarError + + try: + lark.Lark(self.grammar) + except GrammarError as e: + raise ValueError('Lark grammar is invalid') from e + except ImportError: + warn('Cannot validate lark grammar as the lark optional dependency group has not been installed') + elif self.syntax == 'regex': + try: + re.compile(self.grammar) + except re.error as e: + raise ValueError('Regex is invalid') from e + + class GenerateToolJsonSchema(GenerateJsonSchema): def typed_dict_schema(self, schema: core_schema.TypedDictSchema) -> JsonSchemaValue: json_schema = super().typed_dict_schema(schema) @@ -167,6 +218,7 @@ class Tool(Generic[AgentDepsT]): docstring_format: DocstringFormat require_parameter_descriptions: bool strict: bool | None + text_format: Literal['text'] | FunctionTextFormat | None function_schema: _function_schema.FunctionSchema """ The base JSON schema for the tool's parameters. @@ -187,6 +239,7 @@ def __init__( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['text'] | FunctionTextFormat | None = None, function_schema: _function_schema.FunctionSchema | None = None, ): """Create a new tool instance. @@ -240,6 +293,8 @@ async def prep_my_tool( schema_generator: The JSON schema generator class to use. Defaults to `GenerateToolJsonSchema`. strict: Whether to enforce JSON schema compliance (only affects OpenAI). See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. + text_format: Used to invoke the function using free-form function calling (only affects OpenAI). + See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. function_schema: The function schema to use for the tool. If not provided, it will be generated. """ self.function = function @@ -258,6 +313,7 @@ async def prep_my_tool( self.docstring_format = docstring_format self.require_parameter_descriptions = require_parameter_descriptions self.strict = strict + self.text_format = text_format @classmethod def from_schema( @@ -305,6 +361,7 @@ def tool_def(self): description=self.description, parameters_json_schema=self.function_schema.json_schema, strict=self.strict, + text_format=self.text_format, ) async def prepare_tool_def(self, ctx: RunContext[AgentDepsT]) -> ToolDefinition | None: @@ -369,6 +426,18 @@ class ToolDefinition: Note: this is currently only supported by OpenAI models. """ + text_format: Literal['text'] | FunctionTextFormat | None = None + """Whether to invoke the function with free-form function calling for tool calls. + + Setting this to a format while using a supported model prevents parallel tool calling + in exchange for passing raw text payloads to your custom tool without wrapping the data in JSON. + The function must take a single string argument. + + When `None` (the default), the model invokes the tool in the normal way and parallel tool calls are possible. + + Note: this is currently only supported by OpenAI gpt-5 models. + """ + kind: ToolKind = field(default='function') """The kind of tool: @@ -378,4 +447,26 @@ class ToolDefinition: When the model calls a deferred tool, the agent run ends with a `DeferredToolCalls` object and a new run is expected to be started at a later point with the message history and new `ToolReturnPart`s corresponding to each deferred call. """ + @property + def only_takes_string_argument(self) -> bool: + # true if the parameters_json_schema looks like: + # {"additionalProperties": False, "properties": {NAME: {"type": "string"}}, "required": ["NAME"], "type": "object"} + return self.single_string_argument_name is not None + + @property + def single_string_argument_name(self) -> str | None: + # returns the name of the single argument that is a string + # used for free-form function calling + # will return None if there is more or less than one argument, + # or if the argument is not a string + schema = self.parameters_json_schema + if len(schema['required']) != 1: + return None + if len(schema['properties']) != 1: + return None + property_name: str = schema['required'][0] + if not schema['properties'][property_name].get('type', None) == 'string': + return None + return property_name + __repr__ = _utils.dataclasses_no_defaults_repr diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/function.py b/pydantic_ai_slim/pydantic_ai/toolsets/function.py index 87ea9b1e8..7318df270 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/function.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/function.py @@ -2,7 +2,7 @@ from collections.abc import Awaitable, Sequence from dataclasses import dataclass, replace -from typing import Any, Callable, overload +from typing import Any, Callable, Literal, overload from pydantic.json_schema import GenerateJsonSchema @@ -10,6 +10,7 @@ from ..exceptions import UserError from ..tools import ( DocstringFormat, + FunctionTextFormat, GenerateToolJsonSchema, Tool, ToolFuncEither, @@ -80,6 +81,7 @@ def tool( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['text'] | FunctionTextFormat | None = None, ) -> Callable[[ToolFuncEither[AgentDepsT, ToolParams]], ToolFuncEither[AgentDepsT, ToolParams]]: ... def tool( @@ -94,6 +96,7 @@ def tool( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['text'] | FunctionTextFormat | None = None, ) -> Any: """Decorator to register a tool function which takes [`RunContext`][pydantic_ai.tools.RunContext] as its first argument. @@ -140,6 +143,8 @@ async def spam(ctx: RunContext[str], y: float) -> float: schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`. strict: Whether to enforce JSON schema compliance (only affects OpenAI). See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. + text_format: Used to invoke the function using free-form function calling (only affects OpenAI). + See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. """ def tool_decorator( @@ -156,6 +161,7 @@ def tool_decorator( require_parameter_descriptions, schema_generator, strict, + text_format, ) return func_ @@ -172,6 +178,7 @@ def add_function( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['text'] | FunctionTextFormat | None = None, ) -> None: """Add a function as a tool to the toolset. @@ -195,6 +202,8 @@ def add_function( schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`. strict: Whether to enforce JSON schema compliance (only affects OpenAI). See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. + text_format: Used to invoke the function using free-form function calling (only affects OpenAI). + See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. """ tool = Tool[AgentDepsT]( func, @@ -206,6 +215,7 @@ def add_function( require_parameter_descriptions=require_parameter_descriptions, schema_generator=schema_generator, strict=strict, + text_format=text_format, ) self.add_tool(tool) diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index a91afca75..aed611eaa 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -95,6 +95,8 @@ ag-ui = ["ag-ui-protocol>=0.1.8", "starlette>=0.45.3"] retries = ["tenacity>=8.2.3"] # Temporal temporal = ["temporalio>=1.15.0"] +# free form function callins with lark context free grammar +lark = ["lark>=1.2.2"] [tool.hatch.metadata] allow-direct-references = true diff --git a/tests/test_logfire.py b/tests/test_logfire.py index 2bb02f45c..cc06923f9 100644 --- a/tests/test_logfire.py +++ b/tests/test_logfire.py @@ -295,6 +295,7 @@ async def my_ret(x: int) -> str: }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ], diff --git a/tests/test_tools.py b/tests/test_tools.py index 8e1c3361b..4ea7b39ab 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -142,6 +142,7 @@ def test_docstring_google(docstring_format: Literal['google', 'auto']): }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -174,6 +175,7 @@ def test_docstring_sphinx(docstring_format: Literal['sphinx', 'auto']): }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -214,6 +216,7 @@ def test_docstring_numpy(docstring_format: Literal['numpy', 'auto']): }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -254,6 +257,7 @@ def my_tool(x: int) -> str: # pragma: no cover }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -292,6 +296,7 @@ def my_tool(x: int) -> str: # pragma: no cover }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -336,6 +341,7 @@ def my_tool(x: int) -> str: # pragma: no cover }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -368,6 +374,7 @@ def test_only_returns_type(): 'parameters_json_schema': {'additionalProperties': False, 'properties': {}, 'type': 'object'}, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -391,6 +398,7 @@ def test_docstring_unknown(): 'parameters_json_schema': {'additionalProperties': {'type': 'integer'}, 'properties': {}, 'type': 'object'}, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -432,6 +440,7 @@ def test_docstring_google_no_body(docstring_format: Literal['google', 'auto']): }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -466,6 +475,7 @@ def takes_just_model(model: Foo) -> str: }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -509,6 +519,7 @@ def takes_just_model(model: Foo, z: int) -> str: }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -872,6 +883,7 @@ def test_suppress_griffe_logging(caplog: LogCaptureFixture): 'outer_typed_dict_key': None, 'parameters_json_schema': {'additionalProperties': False, 'properties': {}, 'type': 'object'}, 'strict': None, + 'text_format': None, 'kind': 'function', } ) @@ -942,6 +954,7 @@ def my_tool_plain(*, a: int = 1, b: int) -> int: 'type': 'object', }, 'strict': None, + 'text_format': None, 'kind': 'function', }, { @@ -955,6 +968,7 @@ def my_tool_plain(*, a: int = 1, b: int) -> int: 'type': 'object', }, 'strict': None, + 'text_format': None, 'kind': 'function', }, ] @@ -1041,6 +1055,7 @@ def my_tool(x: Annotated[Union[str, None], WithJsonSchema({'type': 'string'})] = 'type': 'object', }, 'strict': None, + 'text_format': None, 'kind': 'function', }, { @@ -1052,6 +1067,7 @@ def my_tool(x: Annotated[Union[str, None], WithJsonSchema({'type': 'string'})] = 'type': 'object', }, 'strict': None, + 'text_format': None, 'kind': 'function', }, ] @@ -1087,6 +1103,7 @@ def get_score(data: Data) -> int: ... # pragma: no branch }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', } ) diff --git a/uv.lock b/uv.lock index 69043ee20..3a975e31a 100644 --- a/uv.lock +++ b/uv.lock @@ -1745,6 +1745,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/01/0e/b27cdbaccf30b890c40ed1da9fd4a3593a5cf94dae54fb34f8a4b74fcd3f/jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af", size = 18437, upload-time = "2025-04-23T12:34:05.422Z" }, ] +[[package]] +name = "lark" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/60/bc7622aefb2aee1c0b4ba23c1446d3e30225c8770b38d7aedbfb65ca9d5a/lark-1.2.2.tar.gz", hash = "sha256:ca807d0162cd16cef15a8feecb862d7319e7a09bdb13aef927968e45040fed80", size = 252132, upload-time = "2024-08-13T19:49:00.652Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/00/d90b10b962b4277f5e64a78b6609968859ff86889f5b898c1a778c06ec00/lark-1.2.2-py3-none-any.whl", hash = "sha256:c2276486b02f0f1b90be155f2c8ba4a8e194d42775786db622faccd652d8e80c", size = 111036, upload-time = "2024-08-13T19:48:58.603Z" }, +] + [[package]] name = "logfire" version = "4.0.0" @@ -3513,6 +3522,9 @@ groq = [ huggingface = [ { name = "huggingface-hub", extra = ["inference"] }, ] +lark = [ + { name = "lark" }, +] logfire = [ { name = "logfire" }, ] @@ -3557,6 +3569,7 @@ requires-dist = [ { name = "groq", marker = "extra == 'groq'", specifier = ">=0.25.0" }, { name = "httpx", specifier = ">=0.27" }, { name = "huggingface-hub", extras = ["inference"], marker = "extra == 'huggingface'", specifier = ">=0.33.5" }, + { name = "lark", marker = "extra == 'lark'", specifier = ">=1.2.2" }, { name = "logfire", marker = "extra == 'logfire'", specifier = ">=3.14.1" }, { name = "mcp", marker = "python_full_version >= '3.10' and extra == 'mcp'", specifier = ">=1.10.0" }, { name = "mistralai", marker = "extra == 'mistral'", specifier = ">=1.9.2" }, @@ -3575,7 +3588,7 @@ requires-dist = [ { name = "tenacity", marker = "extra == 'retries'", specifier = ">=8.2.3" }, { name = "typing-inspection", specifier = ">=0.4.0" }, ] -provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "duckduckgo", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "tavily", "temporal", "vertexai"] +provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "duckduckgo", "evals", "google", "groq", "huggingface", "lark", "logfire", "mcp", "mistral", "openai", "retries", "tavily", "temporal", "vertexai"] [[package]] name = "pydantic-core"