Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,7 @@ See also [the llm tag](https://simonwillison.net/tags/llm/) on my blog.
* [Trying out tools](https://llm.datasette.io/en/stable/tools.html#trying-out-tools)
* [LLM’s implementation of tools](https://llm.datasette.io/en/stable/tools.html#llm-s-implementation-of-tools)
* [Default tools](https://llm.datasette.io/en/stable/tools.html#default-tools)
* [Combining tools with schemas](https://llm.datasette.io/en/stable/tools.html#combining-tools-with-schemas)
* [Tips for implementing tools](https://llm.datasette.io/en/stable/tools.html#tips-for-implementing-tools)
* [Schemas](https://llm.datasette.io/en/stable/schemas.html)
* [Schemas tutorial](https://llm.datasette.io/en/stable/schemas.html#schemas-tutorial)
Expand Down
19 changes: 19 additions & 0 deletions docs/tools.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,25 @@ Try them like this:
llm -T llm_version -T llm_time 'Give me the current time and LLM version' --td
```

(tools-with-schemas)=

## Combining tools with schemas

You can use tools and {ref}`schemas <schemas>` together.

```bash
llm --tool llm_time --schema "date: current date" "What is the time?" --td
```
Example output:
```
Tool call: llm_time({})
{"utc_time": "2025-02-28 14:30:00 UTC", ...}

{"date": "2025-02-28"}
```

The model first calls the `llm_time` tool to get the current time, then uses that information to produce a response that matches the schema.

(tools-tips)=

## Tips for implementing tools
Expand Down
2 changes: 2 additions & 0 deletions llm/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1625,6 +1625,7 @@ def responses(self) -> Iterator[Response]:
tool_results=tool_results,
options=self.prompt.options,
attachments=attachments,
schema=current_response.prompt.schema,
),
self.model,
stream=self.stream,
Expand Down Expand Up @@ -1681,6 +1682,7 @@ async def responses(self) -> AsyncIterator[AsyncResponse]:
tool_results=tool_results,
options=self.prompt.options,
attachments=attachments,
schema=current_response.prompt.schema,
)
current_response = AsyncResponse(
prompt,
Expand Down
53 changes: 53 additions & 0 deletions tests/test_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,59 @@ def test_tool_errors(async_):
) in log_text_result.output


def test_schema_propagates_through_tool_chain():
"""Test that schema is propagated through tool chains."""
model = llm.get_model("echo")
model.supports_schema = True

def get_dog() -> str:
return "Cleo is 10 years old"

dog_schema = {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "integer"}},
}

chain_response = model.chain(
json.dumps({"tool_calls": [{"name": "get_dog"}]}),
tools=[get_dog],
schema=dog_schema,
)
_ = chain_response.text()

assert len(chain_response._responses) == 2
first, second = chain_response._responses
assert first.prompt.schema == dog_schema
assert second.prompt.schema == dog_schema


@pytest.mark.asyncio
async def test_schema_propagates_through_tool_chain_async():
"""Test schema propagation through tool chains for async models."""
model = llm.get_async_model("echo")
model.supports_schema = True

async def get_dog() -> str:
return "Cleo is 10 years old"

dog_schema = {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "integer"}},
}

chain_response = model.chain(
json.dumps({"tool_calls": [{"name": "get_dog"}]}),
tools=[get_dog],
schema=dog_schema,
)
_ = await chain_response.text()

assert len(chain_response._responses) == 2
first, second = chain_response._responses
assert first.prompt.schema == dog_schema
assert second.prompt.schema == dog_schema


def test_chain_sync_cancel_only_first_of_two():
model = llm.get_model("echo")

Expand Down