diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 2b49116b..693d080d 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -61,14 +61,18 @@ jobs:
run: rye build
- name: Get GitHub OIDC Token
- if: github.repository == 'stainless-sdks/letta-sdk-python'
+ if: |-
+ github.repository == 'stainless-sdks/letta-sdk-python' &&
+ !startsWith(github.ref, 'refs/heads/stl/')
id: github-oidc
uses: actions/github-script@v8
with:
script: core.setOutput('github_token', await core.getIDToken());
- name: Upload tarball
- if: github.repository == 'stainless-sdks/letta-sdk-python'
+ if: |-
+ github.repository == 'stainless-sdks/letta-sdk-python' &&
+ !startsWith(github.ref, 'refs/heads/stl/')
env:
URL: https://pkg.stainless.com/s
AUTH: ${{ steps.github-oidc.outputs.github_token }}
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 4df928b0..577aef52 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.7.11"
+ ".": "1.7.12"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 5986e859..6a38ffdc 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 123
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/letta-ai%2Fletta-sdk-641193c8d6dd0ca97bc8f6ffc12534764d4cd9dda8fc5ace158397cb74b7787b.yml
-openapi_spec_hash: e0eef8afab116c4dba3540ab6fd26a8f
-config_hash: 98feceec44cc57270fce259ddbbec999
+configured_endpoints: 125
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/letta-ai%2Fletta-sdk-1c44d73b9152645e7b44512a238467b88c993a2c0151d5911b7f98d05583790e.yml
+openapi_spec_hash: e0633c52cd8d694e72211de731d1354a
+config_hash: 2dd2cc848568d7bec38b1cc774a5900c
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 272bafd1..75c0fbb2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,24 @@
# Changelog
+## 1.7.12 (2026-03-09)
+
+Full Changelog: [v1.7.11...v1.7.12](https://github.com/letta-ai/letta-python/compare/v1.7.11...v1.7.12)
+
+### Features
+
+* add conversation recompile endpoint ([65f0013](https://github.com/letta-ai/letta-python/commit/65f0013b623c195ec21ef627f0f2b68cce0be6ea))
+* add fireworks provider ([8b536c2](https://github.com/letta-ai/letta-python/commit/8b536c2d3709d421a493481b357e8aa254aa0779))
+* add recompile system endpoint to Stainless SDK ([40d9492](https://github.com/letta-ai/letta-python/commit/40d9492580644f1a9b922a0b510c5924a8d01482))
+* **core:** restructure memFS system prompt rendering + add client_skills [LET-7760] ([cc0756b](https://github.com/letta-ai/letta-python/commit/cc0756bfeea232f7d0b94b49e7fe8aa247ed9875))
+* **helm:** M3 multi-signal HPA external metrics + behavior (dev shadow, prod disabled) ([56f66ce](https://github.com/letta-ai/letta-python/commit/56f66cee54e0dfd0cb56156abe7744ee92434336))
+* **readiness:** M2 readiness enforcement scaffold (default OFF) ([5568b66](https://github.com/letta-ai/letta-python/commit/5568b66a784f913408125548fea2b2d9f8445e69))
+
+
+### Chores
+
+* **ci:** skip uploading artifacts on stainless-internal branches ([c179828](https://github.com/letta-ai/letta-python/commit/c17982832afdfcf0d87f81d151f1f33bb9891dde))
+* update placeholder string ([7142275](https://github.com/letta-ai/letta-python/commit/71422757db6f3f9cd572ec32c27217f5229e1cee))
+
## 1.7.11 (2026-03-04)
Full Changelog: [v1.7.10...v1.7.11](https://github.com/letta-ai/letta-python/compare/v1.7.10...v1.7.11)
diff --git a/api.md b/api.md
index e42c37ad..08dd942c 100644
--- a/api.md
+++ b/api.md
@@ -45,6 +45,7 @@ from letta_client.types import (
XaiModelSettings,
AgentExportFileResponse,
AgentImportFileResponse,
+ AgentRecompileResponse,
)
```
@@ -57,6 +58,7 @@ Methods:
- client.agents.delete(agent_id) -> object
- client.agents.export_file(agent_id, \*\*params) -> str
- client.agents.import_file(\*\*params) -> AgentImportFileResponse
+- client.agents.recompile(agent_id, \*\*params) -> str
## Messages
@@ -619,6 +621,7 @@ from letta_client.types import (
UpdateConversation,
ConversationListResponse,
ConversationCancelResponse,
+ ConversationRecompileResponse,
)
```
@@ -630,6 +633,7 @@ Methods:
- client.conversations.list(\*\*params) -> ConversationListResponse
- client.conversations.delete(conversation_id) -> object
- client.conversations.cancel(conversation_id, \*\*params) -> ConversationCancelResponse
+- client.conversations.recompile(conversation_id, \*\*params) -> str
## Messages
diff --git a/pyproject.toml b/pyproject.toml
index 1a4eec52..385a292e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "letta-client"
-version = "1.7.11"
+version = "1.7.12"
description = "The official Python library for the letta API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/letta_client/_client.py b/src/letta_client/_client.py
index 483c4659..07aeb3c6 100644
--- a/src/letta_client/_client.py
+++ b/src/letta_client/_client.py
@@ -367,7 +367,7 @@ def health(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> HealthResponse:
- """Async health check endpoint."""
+ """Liveness endpoint; returns 200 when process is responsive."""
return self.get(
"/v1/health/",
options=make_request_options(
@@ -684,7 +684,7 @@ async def health(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> HealthResponse:
- """Async health check endpoint."""
+ """Liveness endpoint; returns 200 when process is responsive."""
return await self.get(
"/v1/health/",
options=make_request_options(
diff --git a/src/letta_client/_version.py b/src/letta_client/_version.py
index 8954d9cc..e80b6469 100644
--- a/src/letta_client/_version.py
+++ b/src/letta_client/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "letta_client"
-__version__ = "1.7.11" # x-release-please-version
+__version__ = "1.7.12" # x-release-please-version
diff --git a/src/letta_client/resources/agents/agents.py b/src/letta_client/resources/agents/agents.py
index ba8f14f8..4633616c 100644
--- a/src/letta_client/resources/agents/agents.py
+++ b/src/letta_client/resources/agents/agents.py
@@ -39,6 +39,7 @@
agent_create_params,
agent_update_params,
agent_retrieve_params,
+ agent_recompile_params,
agent_export_file_params,
agent_import_file_params,
)
@@ -1038,6 +1039,58 @@ def import_file(
cast_to=AgentImportFileResponse,
)
+ def recompile(
+ self,
+ agent_id: str,
+ *,
+ dry_run: bool | Omit = omit,
+ update_timestamp: bool | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> str:
+ """
+ Manually trigger system prompt recompilation for an agent.
+
+ Args:
+ agent_id: The ID of the agent in the format 'agent-'
+
+ dry_run: If True, do not persist changes; still returns the compiled system prompt.
+
+ update_timestamp: If True, update the in-context memory last edit timestamp embedded in the system
+ prompt.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ return self._post(
+ f"/v1/agents/{agent_id}/recompile",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "dry_run": dry_run,
+ "update_timestamp": update_timestamp,
+ },
+ agent_recompile_params.AgentRecompileParams,
+ ),
+ ),
+ cast_to=str,
+ )
+
class AsyncAgentsResource(AsyncAPIResource):
@cached_property
@@ -1953,6 +2006,58 @@ async def import_file(
cast_to=AgentImportFileResponse,
)
+ async def recompile(
+ self,
+ agent_id: str,
+ *,
+ dry_run: bool | Omit = omit,
+ update_timestamp: bool | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> str:
+ """
+ Manually trigger system prompt recompilation for an agent.
+
+ Args:
+ agent_id: The ID of the agent in the format 'agent-'
+
+ dry_run: If True, do not persist changes; still returns the compiled system prompt.
+
+ update_timestamp: If True, update the in-context memory last edit timestamp embedded in the system
+ prompt.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_id:
+ raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}")
+ return await self._post(
+ f"/v1/agents/{agent_id}/recompile",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "dry_run": dry_run,
+ "update_timestamp": update_timestamp,
+ },
+ agent_recompile_params.AgentRecompileParams,
+ ),
+ ),
+ cast_to=str,
+ )
+
class AgentsResourceWithRawResponse:
def __init__(self, agents: AgentsResource) -> None:
@@ -1979,6 +2084,9 @@ def __init__(self, agents: AgentsResource) -> None:
self.import_file = to_raw_response_wrapper(
agents.import_file,
)
+ self.recompile = to_raw_response_wrapper(
+ agents.recompile,
+ )
@cached_property
def messages(self) -> MessagesResourceWithRawResponse:
@@ -2042,6 +2150,9 @@ def __init__(self, agents: AsyncAgentsResource) -> None:
self.import_file = async_to_raw_response_wrapper(
agents.import_file,
)
+ self.recompile = async_to_raw_response_wrapper(
+ agents.recompile,
+ )
@cached_property
def messages(self) -> AsyncMessagesResourceWithRawResponse:
@@ -2105,6 +2216,9 @@ def __init__(self, agents: AgentsResource) -> None:
self.import_file = to_streamed_response_wrapper(
agents.import_file,
)
+ self.recompile = to_streamed_response_wrapper(
+ agents.recompile,
+ )
@cached_property
def messages(self) -> MessagesResourceWithStreamingResponse:
@@ -2168,6 +2282,9 @@ def __init__(self, agents: AsyncAgentsResource) -> None:
self.import_file = async_to_streamed_response_wrapper(
agents.import_file,
)
+ self.recompile = async_to_streamed_response_wrapper(
+ agents.recompile,
+ )
@cached_property
def messages(self) -> AsyncMessagesResourceWithStreamingResponse:
diff --git a/src/letta_client/resources/agents/messages.py b/src/letta_client/resources/agents/messages.py
index 2a056eef..d3ea7f68 100644
--- a/src/letta_client/resources/agents/messages.py
+++ b/src/letta_client/resources/agents/messages.py
@@ -70,6 +70,7 @@ def create(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_create_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -127,6 +128,10 @@ def create(
background: Whether to process the request in the background (only used when
streaming=true).
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -195,6 +200,7 @@ def create(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_create_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -254,6 +260,10 @@ def create(
background: Whether to process the request in the background (only used when
streaming=true).
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -319,6 +329,7 @@ def create(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_create_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -378,6 +389,10 @@ def create(
background: Whether to process the request in the background (only used when
streaming=true).
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -441,6 +456,7 @@ def create(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_create_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -472,6 +488,7 @@ def create(
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
"assistant_message_tool_name": assistant_message_tool_name,
"background": background,
+ "client_skills": client_skills,
"client_tools": client_tools,
"enable_thinking": enable_thinking,
"include_compaction_messages": include_compaction_messages,
@@ -686,6 +703,7 @@ def create_async(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
callback_url: Optional[str] | Omit = omit,
+ client_skills: Optional[Iterable[message_create_async_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_async_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -731,6 +749,10 @@ def create_async(
callback_url: Optional callback URL to POST to when the job completes
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -788,6 +810,7 @@ def create_async(
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
"assistant_message_tool_name": assistant_message_tool_name,
"callback_url": callback_url,
+ "client_skills": client_skills,
"client_tools": client_tools,
"enable_thinking": enable_thinking,
"include_compaction_messages": include_compaction_messages,
@@ -858,6 +881,7 @@ def stream(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_stream_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_stream_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -908,6 +932,10 @@ def stream(
background: Whether to process the request in the background (only used when
streaming=true).
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -974,6 +1002,7 @@ def stream(
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
"assistant_message_tool_name": assistant_message_tool_name,
"background": background,
+ "client_skills": client_skills,
"client_tools": client_tools,
"enable_thinking": enable_thinking,
"include_compaction_messages": include_compaction_messages,
@@ -1031,6 +1060,7 @@ async def create(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_create_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -1088,6 +1118,10 @@ async def create(
background: Whether to process the request in the background (only used when
streaming=true).
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -1156,6 +1190,7 @@ async def create(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_create_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -1215,6 +1250,10 @@ async def create(
background: Whether to process the request in the background (only used when
streaming=true).
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -1280,6 +1319,7 @@ async def create(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_create_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -1339,6 +1379,10 @@ async def create(
background: Whether to process the request in the background (only used when
streaming=true).
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -1402,6 +1446,7 @@ async def create(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_create_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -1433,6 +1478,7 @@ async def create(
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
"assistant_message_tool_name": assistant_message_tool_name,
"background": background,
+ "client_skills": client_skills,
"client_tools": client_tools,
"enable_thinking": enable_thinking,
"include_compaction_messages": include_compaction_messages,
@@ -1647,6 +1693,7 @@ async def create_async(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
callback_url: Optional[str] | Omit = omit,
+ client_skills: Optional[Iterable[message_create_async_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_async_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -1692,6 +1739,10 @@ async def create_async(
callback_url: Optional callback URL to POST to when the job completes
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -1749,6 +1800,7 @@ async def create_async(
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
"assistant_message_tool_name": assistant_message_tool_name,
"callback_url": callback_url,
+ "client_skills": client_skills,
"client_tools": client_tools,
"enable_thinking": enable_thinking,
"include_compaction_messages": include_compaction_messages,
@@ -1819,6 +1871,7 @@ async def stream(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_stream_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_stream_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -1869,6 +1922,10 @@ async def stream(
background: Whether to process the request in the background (only used when
streaming=true).
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -1935,6 +1992,7 @@ async def stream(
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
"assistant_message_tool_name": assistant_message_tool_name,
"background": background,
+ "client_skills": client_skills,
"client_tools": client_tools,
"enable_thinking": enable_thinking,
"include_compaction_messages": include_compaction_messages,
diff --git a/src/letta_client/resources/conversations/conversations.py b/src/letta_client/resources/conversations/conversations.py
index 93a4026a..03cbff2d 100644
--- a/src/letta_client/resources/conversations/conversations.py
+++ b/src/letta_client/resources/conversations/conversations.py
@@ -12,6 +12,7 @@
conversation_cancel_params,
conversation_create_params,
conversation_update_params,
+ conversation_recompile_params,
)
from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
@@ -369,6 +370,67 @@ def cancel(
cast_to=ConversationCancelResponse,
)
+ def recompile(
+ self,
+ conversation_id: str,
+ *,
+ dry_run: bool | Omit = omit,
+ agent_id: Optional[str] | Omit = omit,
+ compaction_settings: Optional[conversation_recompile_params.CompactionSettings] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> str:
+ """
+ Manually trigger system prompt recompilation for a conversation.
+
+ Args:
+ conversation_id: The conversation identifier. Can be a conversation ID ('conv-'),
+ 'default' for agent-direct mode (with agent_id parameter), or an agent ID
+ ('agent-') for backwards compatibility (deprecated).
+
+ dry_run: If True, do not persist changes; still returns the compiled system prompt.
+
+ agent_id: Agent ID for agent-direct mode with 'default' conversation. Use with
+ conversation_id='default' in the URL path.
+
+ compaction_settings: Configuration for conversation compaction / summarization.
+
+ Per-model settings (temperature, max tokens, etc.) are derived from the default
+ configuration for that handle.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return self._post(
+ f"/v1/conversations/{conversation_id}/recompile",
+ body=maybe_transform(
+ {
+ "agent_id": agent_id,
+ "compaction_settings": compaction_settings,
+ },
+ conversation_recompile_params.ConversationRecompileParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"dry_run": dry_run}, conversation_recompile_params.ConversationRecompileParams),
+ ),
+ cast_to=str,
+ )
+
class AsyncConversationsResource(AsyncAPIResource):
@cached_property
@@ -704,6 +766,69 @@ async def cancel(
cast_to=ConversationCancelResponse,
)
+ async def recompile(
+ self,
+ conversation_id: str,
+ *,
+ dry_run: bool | Omit = omit,
+ agent_id: Optional[str] | Omit = omit,
+ compaction_settings: Optional[conversation_recompile_params.CompactionSettings] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> str:
+ """
+ Manually trigger system prompt recompilation for a conversation.
+
+ Args:
+ conversation_id: The conversation identifier. Can be a conversation ID ('conv-'),
+ 'default' for agent-direct mode (with agent_id parameter), or an agent ID
+ ('agent-') for backwards compatibility (deprecated).
+
+ dry_run: If True, do not persist changes; still returns the compiled system prompt.
+
+ agent_id: Agent ID for agent-direct mode with 'default' conversation. Use with
+ conversation_id='default' in the URL path.
+
+ compaction_settings: Configuration for conversation compaction / summarization.
+
+ Per-model settings (temperature, max tokens, etc.) are derived from the default
+ configuration for that handle.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return await self._post(
+ f"/v1/conversations/{conversation_id}/recompile",
+ body=await async_maybe_transform(
+ {
+ "agent_id": agent_id,
+ "compaction_settings": compaction_settings,
+ },
+ conversation_recompile_params.ConversationRecompileParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {"dry_run": dry_run}, conversation_recompile_params.ConversationRecompileParams
+ ),
+ ),
+ cast_to=str,
+ )
+
class ConversationsResourceWithRawResponse:
def __init__(self, conversations: ConversationsResource) -> None:
@@ -727,6 +852,9 @@ def __init__(self, conversations: ConversationsResource) -> None:
self.cancel = to_raw_response_wrapper(
conversations.cancel,
)
+ self.recompile = to_raw_response_wrapper(
+ conversations.recompile,
+ )
@cached_property
def messages(self) -> MessagesResourceWithRawResponse:
@@ -755,6 +883,9 @@ def __init__(self, conversations: AsyncConversationsResource) -> None:
self.cancel = async_to_raw_response_wrapper(
conversations.cancel,
)
+ self.recompile = async_to_raw_response_wrapper(
+ conversations.recompile,
+ )
@cached_property
def messages(self) -> AsyncMessagesResourceWithRawResponse:
@@ -783,6 +914,9 @@ def __init__(self, conversations: ConversationsResource) -> None:
self.cancel = to_streamed_response_wrapper(
conversations.cancel,
)
+ self.recompile = to_streamed_response_wrapper(
+ conversations.recompile,
+ )
@cached_property
def messages(self) -> MessagesResourceWithStreamingResponse:
@@ -811,6 +945,9 @@ def __init__(self, conversations: AsyncConversationsResource) -> None:
self.cancel = async_to_streamed_response_wrapper(
conversations.cancel,
)
+ self.recompile = async_to_streamed_response_wrapper(
+ conversations.recompile,
+ )
@cached_property
def messages(self) -> AsyncMessagesResourceWithStreamingResponse:
diff --git a/src/letta_client/resources/conversations/messages.py b/src/letta_client/resources/conversations/messages.py
index c7113008..e3070101 100644
--- a/src/letta_client/resources/conversations/messages.py
+++ b/src/letta_client/resources/conversations/messages.py
@@ -63,6 +63,7 @@ def create(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_create_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -115,6 +116,10 @@ def create(
background: Whether to process the request in the background (only used when
streaming=true).
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -182,6 +187,7 @@ def create(
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
"assistant_message_tool_name": assistant_message_tool_name,
"background": background,
+ "client_skills": client_skills,
"client_tools": client_tools,
"enable_thinking": enable_thinking,
"include_compaction_messages": include_compaction_messages,
@@ -467,6 +473,7 @@ async def create(
assistant_message_tool_kwarg: str | Omit = omit,
assistant_message_tool_name: str | Omit = omit,
background: bool | Omit = omit,
+ client_skills: Optional[Iterable[message_create_params.ClientSkill]] | Omit = omit,
client_tools: Optional[Iterable[message_create_params.ClientTool]] | Omit = omit,
enable_thinking: str | Omit = omit,
include_compaction_messages: bool | Omit = omit,
@@ -519,6 +526,10 @@ async def create(
background: Whether to process the request in the background (only used when
streaming=true).
+ client_skills: Client-side skills available in the environment. These are rendered in the
+ system prompt's available skills section alongside agent-scoped skills from
+ MemFS.
+
client_tools: Client-side tools that the agent can call. When the agent calls a client-side
tool, execution pauses and returns control to the client to execute the tool and
provide the result via a ToolReturn.
@@ -586,6 +597,7 @@ async def create(
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
"assistant_message_tool_name": assistant_message_tool_name,
"background": background,
+ "client_skills": client_skills,
"client_tools": client_tools,
"enable_thinking": enable_thinking,
"include_compaction_messages": include_compaction_messages,
diff --git a/src/letta_client/types/__init__.py b/src/letta_client/types/__init__.py
index 1bd271b8..a3b5b921 100644
--- a/src/letta_client/types/__init__.py
+++ b/src/letta_client/types/__init__.py
@@ -76,6 +76,7 @@
from .openai_model_settings import OpenAIModelSettings as OpenAIModelSettings
from .passage_search_params import PassageSearchParams as PassageSearchParams
from .pip_requirement_param import PipRequirementParam as PipRequirementParam
+from .agent_recompile_params import AgentRecompileParams as AgentRecompileParams
from .bedrock_model_settings import BedrockModelSettings as BedrockModelSettings
from .embedding_config_param import EmbeddingConfigParam as EmbeddingConfigParam
from .parent_tool_rule_param import ParentToolRuleParam as ParentToolRuleParam
@@ -89,6 +90,7 @@
from .access_token_list_params import AccessTokenListParams as AccessTokenListParams
from .agent_export_file_params import AgentExportFileParams as AgentExportFileParams
from .agent_import_file_params import AgentImportFileParams as AgentImportFileParams
+from .agent_recompile_response import AgentRecompileResponse as AgentRecompileResponse
from .anthropic_model_settings import AnthropicModelSettings as AnthropicModelSettings
from .continue_tool_rule_param import ContinueToolRuleParam as ContinueToolRuleParam
from .conversation_list_params import ConversationListParams as ConversationListParams
@@ -134,6 +136,7 @@
from .google_vertex_model_settings import GoogleVertexModelSettings as GoogleVertexModelSettings
from .max_count_per_step_tool_rule import MaxCountPerStepToolRule as MaxCountPerStepToolRule
from .mcp_server_retrieve_response import McpServerRetrieveResponse as McpServerRetrieveResponse
+from .conversation_recompile_params import ConversationRecompileParams as ConversationRecompileParams
from .create_stdio_mcp_server_param import CreateStdioMcpServerParam as CreateStdioMcpServerParam
from .deepseek_model_settings_param import DeepseekModelSettingsParam as DeepseekModelSettingsParam
from .together_model_settings_param import TogetherModelSettingsParam as TogetherModelSettingsParam
@@ -141,6 +144,7 @@
from .anthropic_model_settings_param import AnthropicModelSettingsParam as AnthropicModelSettingsParam
from .google_ai_model_settings_param import GoogleAIModelSettingsParam as GoogleAIModelSettingsParam
from .required_before_exit_tool_rule import RequiredBeforeExitToolRule as RequiredBeforeExitToolRule
+from .conversation_recompile_response import ConversationRecompileResponse as ConversationRecompileResponse
from .json_object_response_format_param import JsonObjectResponseFormatParam as JsonObjectResponseFormatParam
from .json_schema_response_format_param import JsonSchemaResponseFormatParam as JsonSchemaResponseFormatParam
from .letta_message_content_union_param import LettaMessageContentUnionParam as LettaMessageContentUnionParam
diff --git a/src/letta_client/types/agent_recompile_params.py b/src/letta_client/types/agent_recompile_params.py
new file mode 100644
index 00000000..82df9e11
--- /dev/null
+++ b/src/letta_client/types/agent_recompile_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AgentRecompileParams"]
+
+
+class AgentRecompileParams(TypedDict, total=False):
+ dry_run: bool
+ """If True, do not persist changes; still returns the compiled system prompt."""
+
+ update_timestamp: bool
+ """
+ If True, update the in-context memory last edit timestamp embedded in the system
+ prompt.
+ """
diff --git a/src/letta_client/types/agent_recompile_response.py b/src/letta_client/types/agent_recompile_response.py
new file mode 100644
index 00000000..dd48cfa8
--- /dev/null
+++ b/src/letta_client/types/agent_recompile_response.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import TypeAlias
+
+__all__ = ["AgentRecompileResponse"]
+
+AgentRecompileResponse: TypeAlias = str
diff --git a/src/letta_client/types/agents/message_create_async_params.py b/src/letta_client/types/agents/message_create_async_params.py
index 65db6cee..37d77f3e 100644
--- a/src/letta_client/types/agents/message_create_async_params.py
+++ b/src/letta_client/types/agents/message_create_async_params.py
@@ -19,6 +19,7 @@
__all__ = [
"MessageCreateAsyncParams",
+ "ClientSkill",
"ClientTool",
"InputUnionMember1",
"InputUnionMember1SummarizedReasoningContent",
@@ -46,6 +47,13 @@ class MessageCreateAsyncParams(TypedDict, total=False):
callback_url: Optional[str]
"""Optional callback URL to POST to when the job completes"""
+ client_skills: Optional[Iterable[ClientSkill]]
+ """Client-side skills available in the environment.
+
+ These are rendered in the system prompt's available skills section alongside
+ agent-scoped skills from MemFS.
+ """
+
client_tools: Optional[Iterable[ClientTool]]
"""Client-side tools that the agent can call.
@@ -118,6 +126,24 @@ class MessageCreateAsyncParams(TypedDict, total=False):
"""
+class ClientSkill(TypedDict, total=False):
+ """Schema for a client-side skill passed in the request.
+
+ Client-side skills represent environment-provided capabilities (e.g. project-scoped
+ skills) that are not stored in the agent's MemFS but should appear in the system
+ prompt's available skills section.
+ """
+
+ description: Required[str]
+ """Description of what the skill does"""
+
+ location: Required[str]
+ """Path or location hint for the skill (e.g. skills/my-skill/SKILL.md)"""
+
+ name: Required[str]
+ """The name of the skill"""
+
+
class ClientTool(TypedDict, total=False):
"""Schema for a client-side tool passed in the request.
diff --git a/src/letta_client/types/agents/message_create_params.py b/src/letta_client/types/agents/message_create_params.py
index 6545a092..559d5510 100644
--- a/src/letta_client/types/agents/message_create_params.py
+++ b/src/letta_client/types/agents/message_create_params.py
@@ -19,6 +19,7 @@
__all__ = [
"MessageCreateParamsBase",
+ "ClientSkill",
"ClientTool",
"InputUnionMember1",
"InputUnionMember1SummarizedReasoningContent",
@@ -51,6 +52,13 @@ class MessageCreateParamsBase(TypedDict, total=False):
streaming=true).
"""
+ client_skills: Optional[Iterable[ClientSkill]]
+ """Client-side skills available in the environment.
+
+ These are rendered in the system prompt's available skills section alongside
+ agent-scoped skills from MemFS.
+ """
+
client_tools: Optional[Iterable[ClientTool]]
"""Client-side tools that the agent can call.
@@ -135,6 +143,24 @@ class MessageCreateParamsBase(TypedDict, total=False):
"""
+class ClientSkill(TypedDict, total=False):
+ """Schema for a client-side skill passed in the request.
+
+ Client-side skills represent environment-provided capabilities (e.g. project-scoped
+ skills) that are not stored in the agent's MemFS but should appear in the system
+ prompt's available skills section.
+ """
+
+ description: Required[str]
+ """Description of what the skill does"""
+
+ location: Required[str]
+ """Path or location hint for the skill (e.g. skills/my-skill/SKILL.md)"""
+
+ name: Required[str]
+ """The name of the skill"""
+
+
class ClientTool(TypedDict, total=False):
"""Schema for a client-side tool passed in the request.
diff --git a/src/letta_client/types/agents/message_stream_params.py b/src/letta_client/types/agents/message_stream_params.py
index bb23aef8..1a23c2d5 100644
--- a/src/letta_client/types/agents/message_stream_params.py
+++ b/src/letta_client/types/agents/message_stream_params.py
@@ -19,6 +19,7 @@
__all__ = [
"MessageStreamParams",
+ "ClientSkill",
"ClientTool",
"InputUnionMember1",
"InputUnionMember1SummarizedReasoningContent",
@@ -49,6 +50,13 @@ class MessageStreamParams(TypedDict, total=False):
streaming=true).
"""
+ client_skills: Optional[Iterable[ClientSkill]]
+ """Client-side skills available in the environment.
+
+ These are rendered in the system prompt's available skills section alongside
+ agent-scoped skills from MemFS.
+ """
+
client_tools: Optional[Iterable[ClientTool]]
"""Client-side tools that the agent can call.
@@ -139,6 +147,24 @@ class MessageStreamParams(TypedDict, total=False):
"""
+class ClientSkill(TypedDict, total=False):
+ """Schema for a client-side skill passed in the request.
+
+ Client-side skills represent environment-provided capabilities (e.g. project-scoped
+ skills) that are not stored in the agent's MemFS but should appear in the system
+ prompt's available skills section.
+ """
+
+ description: Required[str]
+ """Description of what the skill does"""
+
+ location: Required[str]
+ """Path or location hint for the skill (e.g. skills/my-skill/SKILL.md)"""
+
+ name: Required[str]
+ """The name of the skill"""
+
+
class ClientTool(TypedDict, total=False):
"""Schema for a client-side tool passed in the request.
diff --git a/src/letta_client/types/conversation_recompile_params.py b/src/letta_client/types/conversation_recompile_params.py
new file mode 100644
index 00000000..aa2c31c8
--- /dev/null
+++ b/src/letta_client/types/conversation_recompile_params.py
@@ -0,0 +1,196 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal, TypeAlias, TypedDict
+
+from .xai_model_settings_param import XaiModelSettingsParam
+from .groq_model_settings_param import GroqModelSettingsParam
+from .azure_model_settings_param import AzureModelSettingsParam
+from .text_response_format_param import TextResponseFormatParam
+from .openai_model_settings_param import OpenAIModelSettingsParam
+from .bedrock_model_settings_param import BedrockModelSettingsParam
+from .deepseek_model_settings_param import DeepseekModelSettingsParam
+from .together_model_settings_param import TogetherModelSettingsParam
+from .anthropic_model_settings_param import AnthropicModelSettingsParam
+from .google_ai_model_settings_param import GoogleAIModelSettingsParam
+from .json_object_response_format_param import JsonObjectResponseFormatParam
+from .json_schema_response_format_param import JsonSchemaResponseFormatParam
+from .google_vertex_model_settings_param import GoogleVertexModelSettingsParam
+
+__all__ = [
+ "ConversationRecompileParams",
+ "CompactionSettings",
+ "CompactionSettingsModelSettings",
+ "CompactionSettingsModelSettingsZaiModelSettings",
+ "CompactionSettingsModelSettingsZaiModelSettingsResponseFormat",
+ "CompactionSettingsModelSettingsZaiModelSettingsThinking",
+ "CompactionSettingsModelSettingsOpenRouterModelSettings",
+ "CompactionSettingsModelSettingsOpenRouterModelSettingsResponseFormat",
+ "CompactionSettingsModelSettingsChatGptoAuthModelSettings",
+ "CompactionSettingsModelSettingsChatGptoAuthModelSettingsReasoning",
+]
+
+
+class ConversationRecompileParams(TypedDict, total=False):
+ dry_run: bool
+ """If True, do not persist changes; still returns the compiled system prompt."""
+
+ agent_id: Optional[str]
+ """Agent ID for agent-direct mode with 'default' conversation.
+
+ Use with conversation_id='default' in the URL path.
+ """
+
+ compaction_settings: Optional[CompactionSettings]
+ """Configuration for conversation compaction / summarization.
+
+ Per-model settings (temperature, max tokens, etc.) are derived from the default
+ configuration for that handle.
+ """
+
+
+CompactionSettingsModelSettingsZaiModelSettingsResponseFormat: TypeAlias = Union[
+ TextResponseFormatParam, JsonSchemaResponseFormatParam, JsonObjectResponseFormatParam
+]
+
+
+class CompactionSettingsModelSettingsZaiModelSettingsThinking(TypedDict, total=False):
+ """The thinking configuration for GLM-4.5+ models."""
+
+ clear_thinking: bool
+ """If False, preserved thinking is used (recommended for agents)."""
+
+ type: Literal["enabled", "disabled"]
+ """Whether thinking is enabled or disabled."""
+
+
+class CompactionSettingsModelSettingsZaiModelSettings(TypedDict, total=False):
+ """Z.ai (ZhipuAI) model configuration (OpenAI-compatible)."""
+
+ max_output_tokens: int
+ """The maximum number of tokens the model can generate."""
+
+ parallel_tool_calls: bool
+ """Whether to enable parallel tool calling."""
+
+ provider_type: Literal["zai"]
+ """The type of the provider."""
+
+ response_format: Optional[CompactionSettingsModelSettingsZaiModelSettingsResponseFormat]
+ """The response format for the model."""
+
+ temperature: float
+ """The temperature of the model."""
+
+ thinking: CompactionSettingsModelSettingsZaiModelSettingsThinking
+ """The thinking configuration for GLM-4.5+ models."""
+
+
+CompactionSettingsModelSettingsOpenRouterModelSettingsResponseFormat: TypeAlias = Union[
+ TextResponseFormatParam, JsonSchemaResponseFormatParam, JsonObjectResponseFormatParam
+]
+
+
+class CompactionSettingsModelSettingsOpenRouterModelSettings(TypedDict, total=False):
+ """OpenRouter model configuration (OpenAI-compatible)."""
+
+ max_output_tokens: int
+ """The maximum number of tokens the model can generate."""
+
+ parallel_tool_calls: bool
+ """Whether to enable parallel tool calling."""
+
+ provider_type: Literal["openrouter"]
+ """The type of the provider."""
+
+ response_format: Optional[CompactionSettingsModelSettingsOpenRouterModelSettingsResponseFormat]
+ """The response format for the model."""
+
+ temperature: float
+ """The temperature of the model."""
+
+
+class CompactionSettingsModelSettingsChatGptoAuthModelSettingsReasoning(TypedDict, total=False):
+ """The reasoning configuration for the model."""
+
+ reasoning_effort: Literal["none", "low", "medium", "high", "xhigh"]
+ """The reasoning effort level for GPT-5.x and o-series models."""
+
+
+class CompactionSettingsModelSettingsChatGptoAuthModelSettings(TypedDict, total=False):
+ """ChatGPT OAuth model configuration (uses ChatGPT backend API)."""
+
+ max_output_tokens: int
+ """The maximum number of tokens the model can generate."""
+
+ parallel_tool_calls: bool
+ """Whether to enable parallel tool calling."""
+
+ provider_type: Literal["chatgpt_oauth"]
+ """The type of the provider."""
+
+ reasoning: CompactionSettingsModelSettingsChatGptoAuthModelSettingsReasoning
+ """The reasoning configuration for the model."""
+
+ temperature: float
+ """The temperature of the model."""
+
+
+CompactionSettingsModelSettings: TypeAlias = Union[
+ OpenAIModelSettingsParam,
+ AnthropicModelSettingsParam,
+ GoogleAIModelSettingsParam,
+ GoogleVertexModelSettingsParam,
+ AzureModelSettingsParam,
+ XaiModelSettingsParam,
+ CompactionSettingsModelSettingsZaiModelSettings,
+ GroqModelSettingsParam,
+ DeepseekModelSettingsParam,
+ TogetherModelSettingsParam,
+ BedrockModelSettingsParam,
+ CompactionSettingsModelSettingsOpenRouterModelSettings,
+ CompactionSettingsModelSettingsChatGptoAuthModelSettings,
+]
+
+
+class CompactionSettings(TypedDict, total=False):
+ """Configuration for conversation compaction / summarization.
+
+ Per-model settings (temperature,
+ max tokens, etc.) are derived from the default configuration for that handle.
+ """
+
+ clip_chars: Optional[int]
+ """The maximum length of the summary in characters.
+
+ If none, no clipping is performed.
+ """
+
+ mode: Literal["all", "sliding_window", "self_compact_all", "self_compact_sliding_window"]
+ """The type of summarization technique use."""
+
+ model: Optional[str]
+ """
+ Model handle to use for sliding_window/all summarization (format:
+ provider/model-name). If None, uses lightweight provider-specific defaults.
+ """
+
+ model_settings: Optional[CompactionSettingsModelSettings]
+ """Optional model settings used to override defaults for the summarizer model."""
+
+ prompt: Optional[str]
+ """The prompt to use for summarization. If None, uses mode-specific default."""
+
+ prompt_acknowledgement: bool
+ """
+ Whether to include an acknowledgement post-prompt (helps prevent non-summary
+ outputs).
+ """
+
+ sliding_window_percentage: float
+ """
+ The percentage of the context window to keep post-summarization (only used in
+ sliding window modes).
+ """
diff --git a/src/letta_client/types/conversation_recompile_response.py b/src/letta_client/types/conversation_recompile_response.py
new file mode 100644
index 00000000..b9faf5a1
--- /dev/null
+++ b/src/letta_client/types/conversation_recompile_response.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import TypeAlias
+
+__all__ = ["ConversationRecompileResponse"]
+
+ConversationRecompileResponse: TypeAlias = str
diff --git a/src/letta_client/types/conversations/message_create_params.py b/src/letta_client/types/conversations/message_create_params.py
index c53dc746..65530575 100644
--- a/src/letta_client/types/conversations/message_create_params.py
+++ b/src/letta_client/types/conversations/message_create_params.py
@@ -19,6 +19,7 @@
__all__ = [
"MessageCreateParams",
+ "ClientSkill",
"ClientTool",
"InputUnionMember1",
"InputUnionMember1SummarizedReasoningContent",
@@ -55,6 +56,13 @@ class MessageCreateParams(TypedDict, total=False):
streaming=true).
"""
+ client_skills: Optional[Iterable[ClientSkill]]
+ """Client-side skills available in the environment.
+
+ These are rendered in the system prompt's available skills section alongside
+ agent-scoped skills from MemFS.
+ """
+
client_tools: Optional[Iterable[ClientTool]]
"""Client-side tools that the agent can call.
@@ -145,6 +153,24 @@ class MessageCreateParams(TypedDict, total=False):
"""
+class ClientSkill(TypedDict, total=False):
+ """Schema for a client-side skill passed in the request.
+
+ Client-side skills represent environment-provided capabilities (e.g. project-scoped
+ skills) that are not stored in the agent's MemFS but should appear in the system
+ prompt's available skills section.
+ """
+
+ description: Required[str]
+ """Description of what the skill does"""
+
+ location: Required[str]
+ """Path or location hint for the skill (e.g. skills/my-skill/SKILL.md)"""
+
+ name: Required[str]
+ """The name of the skill"""
+
+
class ClientTool(TypedDict, total=False):
"""Schema for a client-side tool passed in the request.
diff --git a/src/letta_client/types/llm_config.py b/src/letta_client/types/llm_config.py
index 336e95e6..1c88ce6a 100644
--- a/src/letta_client/types/llm_config.py
+++ b/src/letta_client/types/llm_config.py
@@ -59,6 +59,8 @@ class LlmConfig(BaseModel):
"deepseek",
"xai",
"zai",
+ "baseten",
+ "fireworks",
"openrouter",
"chatgpt_oauth",
] = FieldInfo(alias="model_endpoint_type")
diff --git a/src/letta_client/types/llm_config_param.py b/src/letta_client/types/llm_config_param.py
index 88c54289..5306c40e 100644
--- a/src/letta_client/types/llm_config_param.py
+++ b/src/letta_client/types/llm_config_param.py
@@ -55,6 +55,8 @@ class LlmConfigParam(TypedDict, total=False):
"deepseek",
"xai",
"zai",
+ "baseten",
+ "fireworks",
"openrouter",
"chatgpt_oauth",
]
diff --git a/src/letta_client/types/provider_type.py b/src/letta_client/types/provider_type.py
index f5f77c89..2e0e170c 100644
--- a/src/letta_client/types/provider_type.py
+++ b/src/letta_client/types/provider_type.py
@@ -7,10 +7,12 @@
ProviderType: TypeAlias = Literal[
"anthropic",
"azure",
+ "baseten",
"bedrock",
"cerebras",
"chatgpt_oauth",
"deepseek",
+ "fireworks",
"google_ai",
"google_vertex",
"groq",
diff --git a/tests/api_resources/agents/test_messages.py b/tests/api_resources/agents/test_messages.py
index 36643f9e..7156ff54 100644
--- a/tests/api_resources/agents/test_messages.py
+++ b/tests/api_resources/agents/test_messages.py
@@ -43,6 +43,13 @@ def test_method_create_with_all_params_overload_1(self, client: Letta) -> None:
assistant_message_tool_kwarg="assistant_message_tool_kwarg",
assistant_message_tool_name="assistant_message_tool_name",
background=True,
+ client_skills=[
+ {
+ "description": "description",
+ "location": "location",
+ "name": "name",
+ }
+ ],
client_tools=[
{
"name": "name",
@@ -136,6 +143,13 @@ def test_method_create_with_all_params_overload_2(self, client: Letta) -> None:
assistant_message_tool_kwarg="assistant_message_tool_kwarg",
assistant_message_tool_name="assistant_message_tool_name",
background=True,
+ client_skills=[
+ {
+ "description": "description",
+ "location": "location",
+ "name": "name",
+ }
+ ],
client_tools=[
{
"name": "name",
@@ -407,6 +421,13 @@ def test_method_create_async_with_all_params(self, client: Letta) -> None:
assistant_message_tool_kwarg="assistant_message_tool_kwarg",
assistant_message_tool_name="assistant_message_tool_name",
callback_url="callback_url",
+ client_skills=[
+ {
+ "description": "description",
+ "location": "location",
+ "name": "name",
+ }
+ ],
client_tools=[
{
"name": "name",
@@ -549,6 +570,13 @@ def test_method_stream_with_all_params(self, client: Letta) -> None:
assistant_message_tool_kwarg="assistant_message_tool_kwarg",
assistant_message_tool_name="assistant_message_tool_name",
background=True,
+ client_skills=[
+ {
+ "description": "description",
+ "location": "location",
+ "name": "name",
+ }
+ ],
client_tools=[
{
"name": "name",
@@ -649,6 +677,13 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
assistant_message_tool_kwarg="assistant_message_tool_kwarg",
assistant_message_tool_name="assistant_message_tool_name",
background=True,
+ client_skills=[
+ {
+ "description": "description",
+ "location": "location",
+ "name": "name",
+ }
+ ],
client_tools=[
{
"name": "name",
@@ -742,6 +777,13 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
assistant_message_tool_kwarg="assistant_message_tool_kwarg",
assistant_message_tool_name="assistant_message_tool_name",
background=True,
+ client_skills=[
+ {
+ "description": "description",
+ "location": "location",
+ "name": "name",
+ }
+ ],
client_tools=[
{
"name": "name",
@@ -1013,6 +1055,13 @@ async def test_method_create_async_with_all_params(self, async_client: AsyncLett
assistant_message_tool_kwarg="assistant_message_tool_kwarg",
assistant_message_tool_name="assistant_message_tool_name",
callback_url="callback_url",
+ client_skills=[
+ {
+ "description": "description",
+ "location": "location",
+ "name": "name",
+ }
+ ],
client_tools=[
{
"name": "name",
@@ -1155,6 +1204,13 @@ async def test_method_stream_with_all_params(self, async_client: AsyncLetta) ->
assistant_message_tool_kwarg="assistant_message_tool_kwarg",
assistant_message_tool_name="assistant_message_tool_name",
background=True,
+ client_skills=[
+ {
+ "description": "description",
+ "location": "location",
+ "name": "name",
+ }
+ ],
client_tools=[
{
"name": "name",
diff --git a/tests/api_resources/conversations/test_messages.py b/tests/api_resources/conversations/test_messages.py
index c6e6bf54..0c212148 100644
--- a/tests/api_resources/conversations/test_messages.py
+++ b/tests/api_resources/conversations/test_messages.py
@@ -38,6 +38,13 @@ def test_method_create_with_all_params(self, client: Letta) -> None:
assistant_message_tool_kwarg="assistant_message_tool_kwarg",
assistant_message_tool_name="assistant_message_tool_name",
background=True,
+ client_skills=[
+ {
+ "description": "description",
+ "location": "location",
+ "name": "name",
+ }
+ ],
client_tools=[
{
"name": "name",
@@ -315,6 +322,13 @@ async def test_method_create_with_all_params(self, async_client: AsyncLetta) ->
assistant_message_tool_kwarg="assistant_message_tool_kwarg",
assistant_message_tool_name="assistant_message_tool_name",
background=True,
+ client_skills=[
+ {
+ "description": "description",
+ "location": "location",
+ "name": "name",
+ }
+ ],
client_tools=[
{
"name": "name",
diff --git a/tests/api_resources/folders/test_files.py b/tests/api_resources/folders/test_files.py
index e73e14aa..5b3f5374 100644
--- a/tests/api_resources/folders/test_files.py
+++ b/tests/api_resources/folders/test_files.py
@@ -197,7 +197,7 @@ def test_path_params_delete(self, client: Letta) -> None:
def test_method_upload(self, client: Letta) -> None:
file = client.folders.files.upload(
folder_id="source-123e4567-e89b-42d3-8456-426614174000",
- file=b"raw file contents",
+ file=b"Example data",
)
assert_matches_type(FileUploadResponse, file, path=["response"])
@@ -206,7 +206,7 @@ def test_method_upload(self, client: Letta) -> None:
def test_method_upload_with_all_params(self, client: Letta) -> None:
file = client.folders.files.upload(
folder_id="source-123e4567-e89b-42d3-8456-426614174000",
- file=b"raw file contents",
+ file=b"Example data",
duplicate_handling="skip",
name="name",
)
@@ -217,7 +217,7 @@ def test_method_upload_with_all_params(self, client: Letta) -> None:
def test_raw_response_upload(self, client: Letta) -> None:
response = client.folders.files.with_raw_response.upload(
folder_id="source-123e4567-e89b-42d3-8456-426614174000",
- file=b"raw file contents",
+ file=b"Example data",
)
assert response.is_closed is True
@@ -230,7 +230,7 @@ def test_raw_response_upload(self, client: Letta) -> None:
def test_streaming_response_upload(self, client: Letta) -> None:
with client.folders.files.with_streaming_response.upload(
folder_id="source-123e4567-e89b-42d3-8456-426614174000",
- file=b"raw file contents",
+ file=b"Example data",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -246,7 +246,7 @@ def test_path_params_upload(self, client: Letta) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `folder_id` but received ''"):
client.folders.files.with_raw_response.upload(
folder_id="",
- file=b"raw file contents",
+ file=b"Example data",
)
@@ -430,7 +430,7 @@ async def test_path_params_delete(self, async_client: AsyncLetta) -> None:
async def test_method_upload(self, async_client: AsyncLetta) -> None:
file = await async_client.folders.files.upload(
folder_id="source-123e4567-e89b-42d3-8456-426614174000",
- file=b"raw file contents",
+ file=b"Example data",
)
assert_matches_type(FileUploadResponse, file, path=["response"])
@@ -439,7 +439,7 @@ async def test_method_upload(self, async_client: AsyncLetta) -> None:
async def test_method_upload_with_all_params(self, async_client: AsyncLetta) -> None:
file = await async_client.folders.files.upload(
folder_id="source-123e4567-e89b-42d3-8456-426614174000",
- file=b"raw file contents",
+ file=b"Example data",
duplicate_handling="skip",
name="name",
)
@@ -450,7 +450,7 @@ async def test_method_upload_with_all_params(self, async_client: AsyncLetta) ->
async def test_raw_response_upload(self, async_client: AsyncLetta) -> None:
response = await async_client.folders.files.with_raw_response.upload(
folder_id="source-123e4567-e89b-42d3-8456-426614174000",
- file=b"raw file contents",
+ file=b"Example data",
)
assert response.is_closed is True
@@ -463,7 +463,7 @@ async def test_raw_response_upload(self, async_client: AsyncLetta) -> None:
async def test_streaming_response_upload(self, async_client: AsyncLetta) -> None:
async with async_client.folders.files.with_streaming_response.upload(
folder_id="source-123e4567-e89b-42d3-8456-426614174000",
- file=b"raw file contents",
+ file=b"Example data",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -479,5 +479,5 @@ async def test_path_params_upload(self, async_client: AsyncLetta) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `folder_id` but received ''"):
await async_client.folders.files.with_raw_response.upload(
folder_id="",
- file=b"raw file contents",
+ file=b"Example data",
)
diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py
index 296c7320..ecd1a590 100644
--- a/tests/api_resources/test_agents.py
+++ b/tests/api_resources/test_agents.py
@@ -584,7 +584,7 @@ def test_path_params_export_file(self, client: Letta) -> None:
@parametrize
def test_method_import_file(self, client: Letta) -> None:
agent = client.agents.import_file(
- file=b"raw file contents",
+ file=b"Example data",
)
assert_matches_type(AgentImportFileResponse, agent, path=["response"])
@@ -592,7 +592,7 @@ def test_method_import_file(self, client: Letta) -> None:
@parametrize
def test_method_import_file_with_all_params(self, client: Letta) -> None:
agent = client.agents.import_file(
- file=b"raw file contents",
+ file=b"Example data",
append_copy_suffix=True,
embedding="embedding",
env_vars_json="env_vars_json",
@@ -613,7 +613,7 @@ def test_method_import_file_with_all_params(self, client: Letta) -> None:
@parametrize
def test_raw_response_import_file(self, client: Letta) -> None:
response = client.agents.with_raw_response.import_file(
- file=b"raw file contents",
+ file=b"Example data",
)
assert response.is_closed is True
@@ -625,7 +625,7 @@ def test_raw_response_import_file(self, client: Letta) -> None:
@parametrize
def test_streaming_response_import_file(self, client: Letta) -> None:
with client.agents.with_streaming_response.import_file(
- file=b"raw file contents",
+ file=b"Example data",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -635,6 +635,58 @@ def test_streaming_response_import_file(self, client: Letta) -> None:
assert cast(Any, response.is_closed) is True
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_recompile(self, client: Letta) -> None:
+ agent = client.agents.recompile(
+ agent_id="agent-123e4567-e89b-42d3-8456-426614174000",
+ )
+ assert_matches_type(str, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_recompile_with_all_params(self, client: Letta) -> None:
+ agent = client.agents.recompile(
+ agent_id="agent-123e4567-e89b-42d3-8456-426614174000",
+ dry_run=True,
+ update_timestamp=True,
+ )
+ assert_matches_type(str, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_recompile(self, client: Letta) -> None:
+ response = client.agents.with_raw_response.recompile(
+ agent_id="agent-123e4567-e89b-42d3-8456-426614174000",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(str, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_recompile(self, client: Letta) -> None:
+ with client.agents.with_streaming_response.recompile(
+ agent_id="agent-123e4567-e89b-42d3-8456-426614174000",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(str, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_recompile(self, client: Letta) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ client.agents.with_raw_response.recompile(
+ agent_id="",
+ )
+
class TestAsyncAgents:
parametrize = pytest.mark.parametrize(
@@ -1203,7 +1255,7 @@ async def test_path_params_export_file(self, async_client: AsyncLetta) -> None:
@parametrize
async def test_method_import_file(self, async_client: AsyncLetta) -> None:
agent = await async_client.agents.import_file(
- file=b"raw file contents",
+ file=b"Example data",
)
assert_matches_type(AgentImportFileResponse, agent, path=["response"])
@@ -1211,7 +1263,7 @@ async def test_method_import_file(self, async_client: AsyncLetta) -> None:
@parametrize
async def test_method_import_file_with_all_params(self, async_client: AsyncLetta) -> None:
agent = await async_client.agents.import_file(
- file=b"raw file contents",
+ file=b"Example data",
append_copy_suffix=True,
embedding="embedding",
env_vars_json="env_vars_json",
@@ -1232,7 +1284,7 @@ async def test_method_import_file_with_all_params(self, async_client: AsyncLetta
@parametrize
async def test_raw_response_import_file(self, async_client: AsyncLetta) -> None:
response = await async_client.agents.with_raw_response.import_file(
- file=b"raw file contents",
+ file=b"Example data",
)
assert response.is_closed is True
@@ -1244,7 +1296,7 @@ async def test_raw_response_import_file(self, async_client: AsyncLetta) -> None:
@parametrize
async def test_streaming_response_import_file(self, async_client: AsyncLetta) -> None:
async with async_client.agents.with_streaming_response.import_file(
- file=b"raw file contents",
+ file=b"Example data",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -1253,3 +1305,55 @@ async def test_streaming_response_import_file(self, async_client: AsyncLetta) ->
assert_matches_type(AgentImportFileResponse, agent, path=["response"])
assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_recompile(self, async_client: AsyncLetta) -> None:
+ agent = await async_client.agents.recompile(
+ agent_id="agent-123e4567-e89b-42d3-8456-426614174000",
+ )
+ assert_matches_type(str, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_recompile_with_all_params(self, async_client: AsyncLetta) -> None:
+ agent = await async_client.agents.recompile(
+ agent_id="agent-123e4567-e89b-42d3-8456-426614174000",
+ dry_run=True,
+ update_timestamp=True,
+ )
+ assert_matches_type(str, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_recompile(self, async_client: AsyncLetta) -> None:
+ response = await async_client.agents.with_raw_response.recompile(
+ agent_id="agent-123e4567-e89b-42d3-8456-426614174000",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(str, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_recompile(self, async_client: AsyncLetta) -> None:
+ async with async_client.agents.with_streaming_response.recompile(
+ agent_id="agent-123e4567-e89b-42d3-8456-426614174000",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(str, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_recompile(self, async_client: AsyncLetta) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"):
+ await async_client.agents.with_raw_response.recompile(
+ agent_id="",
+ )
diff --git a/tests/api_resources/test_conversations.py b/tests/api_resources/test_conversations.py
index ae015451..1695be0c 100644
--- a/tests/api_resources/test_conversations.py
+++ b/tests/api_resources/test_conversations.py
@@ -312,6 +312,75 @@ def test_path_params_cancel(self, client: Letta) -> None:
conversation_id="",
)
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_recompile(self, client: Letta) -> None:
+ conversation = client.conversations.recompile(
+ conversation_id="default",
+ )
+ assert_matches_type(str, conversation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_recompile_with_all_params(self, client: Letta) -> None:
+ conversation = client.conversations.recompile(
+ conversation_id="default",
+ dry_run=True,
+ agent_id="agent_id",
+ compaction_settings={
+ "clip_chars": 0,
+ "mode": "all",
+ "model": "model",
+ "model_settings": {
+ "max_output_tokens": 0,
+ "parallel_tool_calls": True,
+ "provider_type": "openai",
+ "reasoning": {"reasoning_effort": "none"},
+ "response_format": {"type": "text"},
+ "strict": True,
+ "temperature": 0,
+ },
+ "prompt": "prompt",
+ "prompt_acknowledgement": True,
+ "sliding_window_percentage": 0,
+ },
+ )
+ assert_matches_type(str, conversation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_recompile(self, client: Letta) -> None:
+ response = client.conversations.with_raw_response.recompile(
+ conversation_id="default",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ conversation = response.parse()
+ assert_matches_type(str, conversation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_recompile(self, client: Letta) -> None:
+ with client.conversations.with_streaming_response.recompile(
+ conversation_id="default",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ conversation = response.parse()
+ assert_matches_type(str, conversation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_recompile(self, client: Letta) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ client.conversations.with_raw_response.recompile(
+ conversation_id="",
+ )
+
class TestAsyncConversations:
parametrize = pytest.mark.parametrize(
@@ -608,3 +677,72 @@ async def test_path_params_cancel(self, async_client: AsyncLetta) -> None:
await async_client.conversations.with_raw_response.cancel(
conversation_id="",
)
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_recompile(self, async_client: AsyncLetta) -> None:
+ conversation = await async_client.conversations.recompile(
+ conversation_id="default",
+ )
+ assert_matches_type(str, conversation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_recompile_with_all_params(self, async_client: AsyncLetta) -> None:
+ conversation = await async_client.conversations.recompile(
+ conversation_id="default",
+ dry_run=True,
+ agent_id="agent_id",
+ compaction_settings={
+ "clip_chars": 0,
+ "mode": "all",
+ "model": "model",
+ "model_settings": {
+ "max_output_tokens": 0,
+ "parallel_tool_calls": True,
+ "provider_type": "openai",
+ "reasoning": {"reasoning_effort": "none"},
+ "response_format": {"type": "text"},
+ "strict": True,
+ "temperature": 0,
+ },
+ "prompt": "prompt",
+ "prompt_acknowledgement": True,
+ "sliding_window_percentage": 0,
+ },
+ )
+ assert_matches_type(str, conversation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_recompile(self, async_client: AsyncLetta) -> None:
+ response = await async_client.conversations.with_raw_response.recompile(
+ conversation_id="default",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ conversation = await response.parse()
+ assert_matches_type(str, conversation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_recompile(self, async_client: AsyncLetta) -> None:
+ async with async_client.conversations.with_streaming_response.recompile(
+ conversation_id="default",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ conversation = await response.parse()
+ assert_matches_type(str, conversation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_recompile(self, async_client: AsyncLetta) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ await async_client.conversations.with_raw_response.recompile(
+ conversation_id="",
+ )