From 3068d4a27b199b9bee2121f1daa4666f231ea91e Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 9 Jan 2026 13:34:15 +0000 Subject: [PATCH] ## SDK Changes Detected: * `mistral_gcp.chat.complete()`: * `request` **Changed** **Breaking** :warning: * `response` **Changed** * `mistral_gcp.fim.complete()`: `response` **Changed** --- .speakeasy/workflow.lock | 11 +- packages/mistralai_gcp/.gitignore | 3 + packages/mistralai_gcp/.speakeasy/gen.lock | 64 +- packages/mistralai_gcp/.speakeasy/gen.yaml | 15 +- packages/mistralai_gcp/RELEASES.md | 11 + .../docs/models/chatcompletionrequest.md | 37 +- .../models/chatcompletionrequesttoolchoice.md | 2 + .../docs/models/chatcompletionresponse.md | 4 +- .../models/chatcompletionstreamrequest.md | 37 +- .../chatcompletionstreamrequesttoolchoice.md | 2 + .../docs/models/completionchunk.md | 6 +- .../docs/models/fimcompletionrequest.md | 4 +- .../docs/models/fimcompletionresponse.md | 4 +- .../docs/models/fimcompletionstreamrequest.md | 4 +- .../mistralai_gcp/docs/models/function.md | 4 +- .../mistralai_gcp/docs/models/jsonschema.md | 2 +- .../docs/models/mistralpromptmode.md | 8 + .../mistralai_gcp/docs/models/prediction.md | 2 + .../docs/models/responseformat.md | 10 +- .../docs/models/responseformats.md | 2 - .../docs/models/systemmessagecontent.md | 4 +- .../docs/models/systemmessagecontentchunks.md | 17 + .../mistralai_gcp/docs/models/thinkchunk.md | 10 + .../docs/models/thinkchunktype.md | 8 + .../mistralai_gcp/docs/models/thinking.md | 17 + packages/mistralai_gcp/docs/models/tool.md | 4 +- .../mistralai_gcp/docs/models/toolcall.md | 2 +- .../mistralai_gcp/docs/models/toolchoice.md | 4 +- .../mistralai_gcp/docs/models/usageinfo.md | 12 +- packages/mistralai_gcp/poetry.lock | 955 ++++++++++++++++++ packages/mistralai_gcp/poetry.toml | 3 + packages/mistralai_gcp/pylintrc | 11 +- packages/mistralai_gcp/pyproject.toml | 56 +- .../mistralai_gcp/scripts/prepare_readme.py | 38 + packages/mistralai_gcp/scripts/publish.sh | 6 +- .../src/mistralai_gcp/_hooks/types.py | 7 + .../src/mistralai_gcp/_version.py | 8 +- .../src/mistralai_gcp/basesdk.py | 52 +- .../mistralai_gcp/src/mistralai_gcp/chat.py | 173 ++-- .../mistralai_gcp/src/mistralai_gcp/fim.py | 113 +-- .../src/mistralai_gcp/httpclient.py | 22 +- .../src/mistralai_gcp/models/__init__.py | 437 +++++--- .../mistralai_gcp/models/assistantmessage.py | 2 +- .../models/chatcompletionrequest.py | 43 +- .../models/chatcompletionresponse.py | 12 +- .../models/chatcompletionstreamrequest.py | 43 +- .../models/completionresponsestreamchoice.py | 2 +- .../src/mistralai_gcp/models/deltamessage.py | 2 +- .../models/fimcompletionrequest.py | 12 +- .../models/fimcompletionresponse.py | 12 +- .../models/fimcompletionstreamrequest.py | 12 +- .../models/httpvalidationerror.py | 17 +- .../src/mistralai_gcp/models/imageurl.py | 2 +- .../src/mistralai_gcp/models/jsonschema.py | 2 +- .../mistralai_gcp/models/mistralgcperror.py | 26 + .../mistralai_gcp/models/mistralpromptmode.py | 8 + .../mistralai_gcp/models/no_response_error.py | 13 + .../src/mistralai_gcp/models/prediction.py | 4 + .../mistralai_gcp/models/responseformat.py | 8 +- .../mistralai_gcp/models/responseformats.py | 1 - .../models/responsevalidationerror.py | 25 + .../src/mistralai_gcp/models/sdkerror.py | 44 +- .../src/mistralai_gcp/models/systemmessage.py | 10 +- .../models/systemmessagecontentchunks.py | 21 + .../src/mistralai_gcp/models/thinkchunk.py | 35 + .../src/mistralai_gcp/models/toolmessage.py | 2 +- .../src/mistralai_gcp/models/usageinfo.py | 79 +- .../src/mistralai_gcp/models/usermessage.py | 2 +- .../src/mistralai_gcp/sdkconfiguration.py | 7 - .../src/mistralai_gcp/types/basemodel.py | 6 +- .../src/mistralai_gcp/utils/__init__.py | 188 +++- .../src/mistralai_gcp/utils/datetimes.py | 23 + .../src/mistralai_gcp/utils/enums.py | 94 +- .../src/mistralai_gcp/utils/eventstreaming.py | 10 + .../src/mistralai_gcp/utils/forms.py | 77 +- .../src/mistralai_gcp/utils/serializers.py | 36 +- .../utils/unmarshal_json_response.py | 24 + 77 files changed, 2425 insertions(+), 670 deletions(-) create mode 100644 packages/mistralai_gcp/RELEASES.md create mode 100644 packages/mistralai_gcp/docs/models/mistralpromptmode.md create mode 100644 packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md create mode 100644 packages/mistralai_gcp/docs/models/thinkchunk.md create mode 100644 packages/mistralai_gcp/docs/models/thinkchunktype.md create mode 100644 packages/mistralai_gcp/docs/models/thinking.md create mode 100644 packages/mistralai_gcp/poetry.lock create mode 100644 packages/mistralai_gcp/poetry.toml create mode 100644 packages/mistralai_gcp/scripts/prepare_readme.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 48c4bf7b..982b544f 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -8,10 +8,11 @@ sources: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 - sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f + sourceRevisionDigest: sha256:bc59aaf55dc46e94ddf6cc687292807629d7a17ee5f573a504e7e44fd365e147 + sourceBlobDigest: sha256:545fe85c5dae11def2741fc7a99f297b7f0728c9677c3c7b94d56ddbed70581d tags: - latest + - speakeasy-sdk-regen-guillaume.dumont-fix-github-actions-1767965603 mistral-openapi: sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:cb63bd997cefe7b3b36e91a475df57cb779bf79f183340e0713d8ffb16a2dabc @@ -30,10 +31,10 @@ targets: mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 - sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f + sourceRevisionDigest: sha256:bc59aaf55dc46e94ddf6cc687292807629d7a17ee5f573a504e7e44fd365e147 + sourceBlobDigest: sha256:545fe85c5dae11def2741fc7a99f297b7f0728c9677c3c7b94d56ddbed70581d codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:03b3e82c20d10faa8622f14696632b96b1a2e8d747b266fff345061298d5f3e4 + codeSamplesRevisionDigest: sha256:aa98a5f632312a6f8a91bdfb70de90ffa494f41b7a3ef6fd38d740bcaf2abc40 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi diff --git a/packages/mistralai_gcp/.gitignore b/packages/mistralai_gcp/.gitignore index 5a82b069..f2ea8c39 100644 --- a/packages/mistralai_gcp/.gitignore +++ b/packages/mistralai_gcp/.gitignore @@ -1,3 +1,6 @@ +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ .speakeasy/reports README-PYPI.md .venv/ diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 5e157235..b723b2dd 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,41 +1,44 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 28fe1ab59b4dee005217f2dbbd836060 - docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 - releaseVersion: 1.6.0 - configChecksum: 66bf5911f59189922e03a75a72923b32 + docChecksum: 05fc6f45406deac180ffc1df760c67f4 + docVersion: 1.0.0 + speakeasyVersion: 1.606.10 + generationVersion: 2.687.13 + releaseVersion: 2.0.0 + configChecksum: a1b03996b9a524c2110678cbe2b68226 + repoURL: https://github.com/mistralai/client-python.git + repoSubDirectory: packages/mistralai_gcp + installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_gcp published: true features: python: additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.20.1 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.1 + examples: 3.0.2 flatRequests: 1.0.1 globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.0 + globalServerURLs: 3.1.1 methodArguments: 1.0.2 nameOverrides: 3.0.1 nullables: 1.0.1 - openEnums: 1.0.0 + openEnums: 1.0.1 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 - serverEvents: 1.0.7 + sdkHooks: 1.1.0 + serverEvents: 1.0.8 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 unions: 3.0.4 generatedFiles: - .gitattributes - - .python-version - .vscode/settings.json - docs/models/arguments.md - docs/models/assistantmessage.md @@ -73,6 +76,7 @@ generatedFiles: - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md + - docs/models/mistralpromptmode.md - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md @@ -83,7 +87,11 @@ generatedFiles: - docs/models/stop.md - docs/models/systemmessage.md - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md - docs/models/textchunk.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md - docs/models/tool.md - docs/models/toolcall.md - docs/models/toolchoice.md @@ -135,14 +143,20 @@ generatedFiles: - src/mistralai_gcp/models/imageurl.py - src/mistralai_gcp/models/imageurlchunk.py - src/mistralai_gcp/models/jsonschema.py + - src/mistralai_gcp/models/mistralgcperror.py + - src/mistralai_gcp/models/mistralpromptmode.py + - src/mistralai_gcp/models/no_response_error.py - src/mistralai_gcp/models/prediction.py - src/mistralai_gcp/models/referencechunk.py - src/mistralai_gcp/models/responseformat.py - src/mistralai_gcp/models/responseformats.py + - src/mistralai_gcp/models/responsevalidationerror.py - src/mistralai_gcp/models/sdkerror.py - src/mistralai_gcp/models/security.py - src/mistralai_gcp/models/systemmessage.py + - src/mistralai_gcp/models/systemmessagecontentchunks.py - src/mistralai_gcp/models/textchunk.py + - src/mistralai_gcp/models/thinkchunk.py - src/mistralai_gcp/models/tool.py - src/mistralai_gcp/models/toolcall.py - src/mistralai_gcp/models/toolchoice.py @@ -153,12 +167,12 @@ generatedFiles: - src/mistralai_gcp/models/usermessage.py - src/mistralai_gcp/models/validationerror.py - src/mistralai_gcp/py.typed - - src/mistralai_gcp/sdk.py - src/mistralai_gcp/sdkconfiguration.py - src/mistralai_gcp/types/__init__.py - src/mistralai_gcp/types/basemodel.py - src/mistralai_gcp/utils/__init__.py - src/mistralai_gcp/utils/annotations.py + - src/mistralai_gcp/utils/datetimes.py - src/mistralai_gcp/utils/enums.py - src/mistralai_gcp/utils/eventstreaming.py - src/mistralai_gcp/utils/forms.py @@ -170,42 +184,40 @@ generatedFiles: - src/mistralai_gcp/utils/retries.py - src/mistralai_gcp/utils/security.py - src/mistralai_gcp/utils/serializers.py + - src/mistralai_gcp/utils/unmarshal_json_response.py - src/mistralai_gcp/utils/url.py - src/mistralai_gcp/utils/values.py examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} responses: "422": application/json: {} - "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} stream_fim: speakeasy-default-stream-fim: requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-latest", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} responses: "422": application/json: {} - "200": {} fim_completion_v1_fim_completions_post: - speakeasy-default-fim-completion-v1-fim-completions-post: + userExample: requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} - "422": - application/json: {} -examplesVersion: 1.0.0 + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} +examplesVersion: 1.0.2 generatedTests: {} +releaseNotes: "## SDK Changes Detected:\n* `mistral_gcp.chat.complete()`: \n * `request` **Changed** **Breaking** :warning:\n * `response` **Changed**\n* `mistral_gcp.fim.complete()`: `response` **Changed**\n" diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index d7be7fed..0732ba52 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -4,6 +4,7 @@ generation: maintainOpenAPIOrder: true usageSnippets: optionalPropertyRendering: withExample + sdkInitStyle: constructor useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true @@ -11,11 +12,16 @@ generation: parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true securityFeb2025: false + sharedErrorComponentsApr2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false python: - version: 1.6.0 + version: 2.0.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -23,8 +29,12 @@ python: main: google-auth: ^2.31.0 requests: ^2.32.3 + allowedRedefinedBuiltins: + - id + - object authors: - Mistral + baseErrorName: MistralGcpError clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. @@ -46,8 +56,11 @@ python: inputModelSuffix: input maxMethodParams: 15 methodArguments: infer-optional-args + moduleName: "" outputModelSuffix: output + packageManager: poetry packageName: mistralai-gcp + pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_gcp/RELEASES.md b/packages/mistralai_gcp/RELEASES.md new file mode 100644 index 00000000..578175f4 --- /dev/null +++ b/packages/mistralai_gcp/RELEASES.md @@ -0,0 +1,11 @@ + + +## 2026-01-09 13:33:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0] packages/mistralai_gcp +### Releases +- [PyPI v2.0.0] https://pypi.org/project/mistralai-gcp/2.0.0 - packages/mistralai_gcp \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index 9d735d08..48103e30 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -3,21 +3,22 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md index 1646528d..dc82a8ef 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md index ad376158..a0465ffb 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index 827943cd..aaacc09c 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -3,21 +3,22 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md index cce0ca3e..43f3ca38 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionStreamRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/completionchunk.md b/packages/mistralai_gcp/docs/models/completionchunk.md index b8ae6a09..7f8ab5e6 100644 --- a/packages/mistralai_gcp/docs/models/completionchunk.md +++ b/packages/mistralai_gcp/docs/models/completionchunk.md @@ -6,8 +6,8 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | | `id` | *str* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | -| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | | `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | `created` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index 7507b90c..380f109c 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -5,13 +5,13 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md index da786a1f..cd62d034 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index 6cc439c7..a890ff2b 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -5,13 +5,13 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md index a166b7bb..b2bdb3fe 100644 --- a/packages/mistralai_gcp/docs/models/function.md +++ b/packages/mistralai_gcp/docs/models/function.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | -| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/mistralai_gcp/docs/models/jsonschema.md index ae387867..7ff7c070 100644 --- a/packages/mistralai_gcp/docs/models/jsonschema.md +++ b/packages/mistralai_gcp/docs/models/jsonschema.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ----------------------- | ----------------------- | ----------------------- | ----------------------- | | `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/mistralpromptmode.md b/packages/mistralai_gcp/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..7416e203 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/mistralpromptmode.md @@ -0,0 +1,8 @@ +# MistralPromptMode + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/prediction.md b/packages/mistralai_gcp/docs/models/prediction.md index 86e9c396..fae3c1ca 100644 --- a/packages/mistralai_gcp/docs/models/prediction.md +++ b/packages/mistralai_gcp/docs/models/prediction.md @@ -1,5 +1,7 @@ # Prediction +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + ## Fields diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md index 23a1641b..5cab22f2 100644 --- a/packages/mistralai_gcp/docs/models/responseformat.md +++ b/packages/mistralai_gcp/docs/models/responseformat.md @@ -1,9 +1,11 @@ # ResponseFormat +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/mistralai_gcp/docs/models/responseformats.md index 06886afe..2f5f1e55 100644 --- a/packages/mistralai_gcp/docs/models/responseformats.md +++ b/packages/mistralai_gcp/docs/models/responseformats.md @@ -1,7 +1,5 @@ # ResponseFormats -An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. - ## Values diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontent.md b/packages/mistralai_gcp/docs/models/systemmessagecontent.md index e0d27d9f..0c87baf3 100644 --- a/packages/mistralai_gcp/docs/models/systemmessagecontent.md +++ b/packages/mistralai_gcp/docs/models/systemmessagecontent.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.SystemMessageContentChunks]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.SystemMessageContentChunks] = /* values here */ ``` diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md b/packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md new file mode 100644 index 00000000..40030c17 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md @@ -0,0 +1,17 @@ +# SystemMessageContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/thinkchunk.md b/packages/mistralai_gcp/docs/models/thinkchunk.md new file mode 100644 index 00000000..66b2e0cd --- /dev/null +++ b/packages/mistralai_gcp/docs/models/thinkchunk.md @@ -0,0 +1,10 @@ +# ThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | +| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/thinkchunktype.md b/packages/mistralai_gcp/docs/models/thinkchunktype.md new file mode 100644 index 00000000..baf6f755 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/thinkchunktype.md @@ -0,0 +1,8 @@ +# ThinkChunkType + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `THINKING` | thinking | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/thinking.md b/packages/mistralai_gcp/docs/models/thinking.md new file mode 100644 index 00000000..c7a0d5c9 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/thinking.md @@ -0,0 +1,17 @@ +# Thinking + + +## Supported Types + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/mistralai_gcp/docs/models/tool.md index 822f86f8..fb661f72 100644 --- a/packages/mistralai_gcp/docs/models/tool.md +++ b/packages/mistralai_gcp/docs/models/tool.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md index 574be1ea..3819236b 100644 --- a/packages/mistralai_gcp/docs/models/toolcall.md +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolchoice.md b/packages/mistralai_gcp/docs/models/toolchoice.md index 792ebcd6..373046bb 100644 --- a/packages/mistralai_gcp/docs/models/toolchoice.md +++ b/packages/mistralai_gcp/docs/models/toolchoice.md @@ -7,5 +7,5 @@ ToolChoice is either a ToolChoiceEnum or a ToolChoice | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usageinfo.md b/packages/mistralai_gcp/docs/models/usageinfo.md index 9f56a3ae..f5204ac9 100644 --- a/packages/mistralai_gcp/docs/models/usageinfo.md +++ b/packages/mistralai_gcp/docs/models/usageinfo.md @@ -3,8 +3,10 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | -| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | -| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock new file mode 100644 index 00000000..16fa0e0f --- /dev/null +++ b/packages/mistralai_gcp/poetry.lock @@ -0,0 +1,955 @@ +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.12.1" +description = "High-level concurrency and networking framework on top of asyncio or Trio" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c"}, + {file = "anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} + +[package.extras] +trio = ["trio (>=0.31.0) ; python_version < \"3.10\"", "trio (>=0.32.0) ; python_version >= \"3.10\""] + +[[package]] +name = "astroid" +version = "3.2.4" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.8.0" +groups = ["dev"] +files = [ + {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, + {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "certifi" +version = "2026.1.4" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"}, + {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d"}, + {file = "charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016"}, + {file = "charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525"}, + {file = "charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14"}, + {file = "charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c"}, + {file = "charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ce8a0633f41a967713a59c4139d29110c07e826d131a316b50ce11b1d79b4f84"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaabd426fe94daf8fd157c32e571c85cb12e66692f15516a83a03264b08d06c3"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c4ef880e27901b6cc782f1b95f82da9313c0eb95c3af699103088fa0ac3ce9ac"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aaba3b0819274cc41757a1da876f810a3e4d7b6eb25699253a4effef9e8e4af"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:778d2e08eda00f4256d7f672ca9fef386071c9202f5e4607920b86d7803387f2"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f155a433c2ec037d4e8df17d18922c3a0d9b3232a396690f17175d2946f0218d"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a8bf8d0f749c5757af2142fe7903a9df1d2e8aa3841559b2bad34b08d0e2bcf3"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:194f08cbb32dc406d6e1aea671a68be0823673db2832b38405deba2fb0d88f63"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:6aee717dcfead04c6eb1ce3bd29ac1e22663cdea57f943c87d1eab9a025438d7"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cd4b7ca9984e5e7985c12bc60a6f173f3c958eae74f3ef6624bb6b26e2abbae4"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:b7cf1017d601aa35e6bb650b6ad28652c9cd78ee6caff19f3c28d03e1c80acbf"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e912091979546adf63357d7e2ccff9b44f026c075aeaf25a52d0e95ad2281074"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5cb4d72eea50c8868f5288b7f7f33ed276118325c1dfd3957089f6b519e1382a"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-win32.whl", hash = "sha256:837c2ce8c5a65a2035be9b3569c684358dfbf109fd3b6969630a87535495ceaa"}, + {file = "charset_normalizer-3.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:44c2a8734b333e0578090c4cd6b16f275e07aa6614ca8715e6c038e865e70576"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win32.whl", hash = "sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966"}, + {file = "charset_normalizer-3.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50"}, + {file = "charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f"}, + {file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +markers = "sys_platform == \"win32\"" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "dill" +version = "0.4.0" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049"}, + {file = "dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + +[[package]] +name = "exceptiongroup" +version = "1.3.1" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +markers = "python_version < \"3.11\"" +files = [ + {file = "exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598"}, + {file = "exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "google-auth" +version = "2.47.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "google_auth-2.47.0-py3-none-any.whl", hash = "sha256:c516d68336bfde7cf0da26aab674a36fedcf04b37ac4edd59c597178760c3498"}, + {file = "google_auth-2.47.0.tar.gz", hash = "sha256:833229070a9dfee1a353ae9877dcd2dec069a8281a4e72e72f77d4a70ff945da"}, +] + +[package.dependencies] +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0)", "requests (>=2.20.0,<3.0.0)"] +cryptography = ["cryptography (>=38.0.3)"] +enterprise-cert = ["cryptography", "pyopenssl"] +pyjwt = ["cryptography (>=38.0.3)", "pyjwt (>=2.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0)"] +testing = ["aiohttp (<3.10.0)", "aiohttp (>=3.6.2,<4.0.0)", "aioresponses", "cryptography (>=38.0.3)", "cryptography (>=38.0.3)", "flask", "freezegun", "grpcio", "oauth2client", "packaging", "pyjwt (>=2.0)", "pyopenssl (<24.3.0)", "pyopenssl (>=20.0.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-localserver", "pyu2f (>=0.1.5)", "requests (>=2.20.0,<3.0.0)", "responses", "urllib3"] +urllib3 = ["packaging", "urllib3"] + +[[package]] +name = "h11" +version = "0.16.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, + {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.16" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httpx" +version = "0.28.1" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" + +[package.extras] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "idna" +version = "3.11" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea"}, + {file = "idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "iniconfig" +version = "2.1.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.11\"" +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "python_version >= \"3.11\"" +files = [ + {file = "iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12"}, + {file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"}, +] + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +groups = ["dev"] +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +groups = ["dev"] +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mypy" +version = "1.15.0" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, + {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, + {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, + {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, + {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, + {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, + {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, + {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, + {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, + {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, + {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, + {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, + {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, + {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, +] + +[package.dependencies] +mypy_extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing_extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, + {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, +] + +[[package]] +name = "packaging" +version = "25.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "platformdirs" +version = "4.4.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version < \"3.11\"" +files = [ + {file = "platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85"}, + {file = "platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.14.1)"] + +[[package]] +name = "platformdirs" +version = "4.5.1" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "python_version >= \"3.11\"" +files = [ + {file = "platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31"}, + {file = "platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda"}, +] + +[package.extras] +docs = ["furo (>=2025.9.25)", "proselint (>=0.14)", "sphinx (>=8.2.3)", "sphinx-autodoc-typehints (>=3.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.4.2)", "pytest-cov (>=7)", "pytest-mock (>=3.15.1)"] +type = ["mypy (>=1.18.2)"] + +[[package]] +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] + +[[package]] +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a"}, + {file = "pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6"}, +] + +[package.dependencies] +pyasn1 = ">=0.6.1,<0.7.0" + +[[package]] +name = "pydantic" +version = "2.12.5" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d"}, + {file = "pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.41.5" +typing-extensions = ">=4.14.1" +typing-inspection = ">=0.4.2" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146"}, + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51"}, + {file = "pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e"}, +] + +[package.dependencies] +typing-extensions = ">=4.14.1" + +[[package]] +name = "pygments" +version = "2.19.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pylint" +version = "3.2.3" +description = "python code static checker" +optional = false +python-versions = ">=3.8.0" +groups = ["dev"] +files = [ + {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, + {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, +] + +[package.dependencies] +astroid = ">=3.2.2,<=3.3.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.2", markers = "python_version < \"3.11\""}, + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version == \"3.11\""}, +] +isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +tomlkit = ">=0.10.1" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + +[[package]] +name = "pytest" +version = "8.4.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79"}, + {file = "pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01"}, +] + +[package.dependencies] +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1", markers = "python_version < \"3.11\""} +iniconfig = ">=1" +packaging = ">=20" +pluggy = ">=1.5,<2" +pygments = ">=2.7.2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, + {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "requests" +version = "2.32.5" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, + {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset_normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rsa" +version = "4.2" +description = "Pure-Python RSA implementation" +optional = false +python-versions = "*" +groups = ["main"] +markers = "python_version >= \"3.12\"" +files = [ + {file = "rsa-4.2.tar.gz", hash = "sha256:aaefa4b84752e3e99bd8333a2e1e3e7a7da64614042bd66f775573424370108a"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "rsa" +version = "4.9.1" +description = "Pure-Python RSA implementation" +optional = false +python-versions = "<4,>=3.6" +groups = ["main"] +markers = "python_version <= \"3.11\"" +files = [ + {file = "rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762"}, + {file = "rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "tomli" +version = "2.3.0" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.11\"" +files = [ + {file = "tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45"}, + {file = "tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba"}, + {file = "tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf"}, + {file = "tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441"}, + {file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845"}, + {file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c"}, + {file = "tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456"}, + {file = "tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be"}, + {file = "tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac"}, + {file = "tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22"}, + {file = "tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f"}, + {file = "tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52"}, + {file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8"}, + {file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6"}, + {file = "tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876"}, + {file = "tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878"}, + {file = "tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b"}, + {file = "tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae"}, + {file = "tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b"}, + {file = "tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf"}, + {file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f"}, + {file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05"}, + {file = "tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606"}, + {file = "tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999"}, + {file = "tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e"}, + {file = "tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3"}, + {file = "tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc"}, + {file = "tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0"}, + {file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879"}, + {file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005"}, + {file = "tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463"}, + {file = "tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8"}, + {file = "tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77"}, + {file = "tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf"}, + {file = "tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530"}, + {file = "tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b"}, + {file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67"}, + {file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f"}, + {file = "tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0"}, + {file = "tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba"}, + {file = "tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b"}, + {file = "tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549"}, +] + +[[package]] +name = "tomlkit" +version = "0.13.3" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0"}, + {file = "tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1"}, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +description = "Backported and Experimental Type Hints for Python 3.9+" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, + {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7"}, + {file = "typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + +[[package]] +name = "urllib3" +version = "2.6.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4"}, + {file = "urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed"}, +] + +[package.extras] +brotli = ["brotli (>=1.2.0) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=1.2.0.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] + +[metadata] +lock-version = "2.1" +python-versions = ">=3.9.2" +content-hash = "a9b15a9656c8560205927f8d6944fc53ed3be64e14a2349fb13b5f488b4f6c32" diff --git a/packages/mistralai_gcp/poetry.toml b/packages/mistralai_gcp/poetry.toml new file mode 100644 index 00000000..cd3492ac --- /dev/null +++ b/packages/mistralai_gcp/poetry.toml @@ -0,0 +1,3 @@ + +[virtualenvs] +in-project = true diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index c80721af..a8fcb932 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.10 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no @@ -188,8 +188,8 @@ good-names=i, Run, _, e, - n, - id + id, + n # Good variable names regexes, separated by a comma. If names match any regex, # they will always be accepted @@ -458,7 +458,8 @@ disable=raw-checker-failed, relative-beyond-top-level, consider-using-with, wildcard-import, - unused-wildcard-import + unused-wildcard-import, + too-many-return-statements # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -659,4 +660,4 @@ init-import=no # List of qualified module names which can have objects that can redefine # builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 79b8193b..cb54d966 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,57 +1,51 @@ + [project] name = "mistralai-gcp" -version = "1.6.0" +version = "2.0.0" description = "Python Client SDK for the Mistral AI API in GCP." -authors = [{ name = "Mistral" }] -requires-python = ">=3.10" -readme = "README.md" +authors = [{ name = "Mistral" },] +readme = "README-PYPI.md" +requires-python = ">=3.9.2" dependencies = [ - "eval-type-backport >=0.2.0", "google-auth (>=2.31.0,<3.0.0)", + "httpcore >=1.0.9", "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", + "pydantic >=2.11.2", "requests (>=2.32.3,<3.0.0)", - "typing-inspection >=0.4.0", ] -[dependency-groups] -dev = [ - "mypy==1.14.1", - "pylint==3.2.3", - "pytest>=8.2.2,<9", - "pytest-asyncio>=0.23.7,<0.24", - "types-python-dateutil>=2.9.0.20240316,<3", +[tool.poetry] +repository = "https://github.com/mistralai/client-python.git" +packages = [ + { include = "mistralai_gcp", from = "src" } ] +include = ["py.typed", "src/mistralai_gcp/py.typed"] [tool.setuptools.package-data] "*" = ["py.typed", "src/mistralai_gcp/py.typed"] -[tool.hatch.build.targets.sdist] -include = ["src/mistralai_gcp"] - -[tool.hatch.build.targets.sdist.force-include] -"py.typed" = "py.typed" -"src/mistralai_gcp/py.typed" = "src/mistralai_gcp/py.typed" - -[tool.hatch.build.targets.wheel] -include = ["src/mistralai_gcp"] - -[tool.hatch.build.targets.wheel.sources] -"src/mistralai_gcp" = "mistralai_gcp" - [virtualenvs] in-project = true +[tool.poetry.group.dev.dependencies] +mypy = "==1.15.0" +pylint = "==3.2.3" +pytest = "^8.2.2" +pytest-asyncio = "^0.23.7" + [build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" +asyncio_mode = "auto" pythonpath = ["src"] [tool.mypy] disable_error_code = "misc" +explicit_package_bases = true +mypy_path = "src" [[tool.mypy.overrides]] module = "typing_inspect" @@ -64,3 +58,5 @@ ignore_missing_imports = true [tool.pyright] venvPath = "." venv = ".venv" + + diff --git a/packages/mistralai_gcp/scripts/prepare_readme.py b/packages/mistralai_gcp/scripts/prepare_readme.py new file mode 100644 index 00000000..6c4b9932 --- /dev/null +++ b/packages/mistralai_gcp/scripts/prepare_readme.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import shutil + +try: + with open("README.md", "r", encoding="utf-8") as rh: + readme_contents = rh.read() + GITHUB_URL = "https://github.com/mistralai/client-python.git" + GITHUB_URL = ( + GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL + ) + REPO_SUBDIR = "packages/mistralai_gcp" + # Ensure the subdirectory has a trailing slash + if not REPO_SUBDIR.endswith("/"): + REPO_SUBDIR += "/" + # links on PyPI should have absolute URLs + readme_contents = re.sub( + r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))", + lambda m: m.group(1) + + GITHUB_URL + + "/blob/master/" + + REPO_SUBDIR + + m.group(2) + + m.group(3), + readme_contents, + ) + + with open("README-PYPI.md", "w", encoding="utf-8") as wh: + wh.write(readme_contents) +except Exception as e: + try: + print("Failed to rewrite README.md to README-PYPI.md, copying original instead") + print(e) + shutil.copyfile("README.md", "README-PYPI.md") + except Exception as ie: + print("Failed to copy README.md to README-PYPI.md") + print(ie) diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index e9eb1f0b..2a3ead70 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash +export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -export UV_PUBLISH_TOKEN=${PYPI_TOKEN} +poetry run python scripts/prepare_readme.py -uv run python ../../scripts/prepare_readme.py --repo-subdir packages/mistralai_gcp -- uv build -uv publish +poetry publish --build --skip-existing diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py index bb867b5b..f8088f4c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -3,10 +3,12 @@ from abc import ABC, abstractmethod import httpx from mistralai_gcp.httpclient import HttpClient +from mistralai_gcp.sdkconfiguration import SDKConfiguration from typing import Any, Callable, List, Optional, Tuple, Union class HookContext: + config: SDKConfiguration base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None @@ -14,11 +16,13 @@ class HookContext: def __init__( self, + config: SDKConfiguration, base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.config = config self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes @@ -28,6 +32,7 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -38,6 +43,7 @@ def __init__(self, hook_ctx: HookContext): class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -48,6 +54,7 @@ def __init__(self, hook_ctx: HookContext): class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index 11f38b63..5ba9806b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "1.6.0" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai-gcp" +__version__: str = "2.0.0" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.687.13" +__user_agent__: str = "speakeasy-sdk/python 2.0.0 2.687.13 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index bb0aab96..f22e2346 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -15,9 +15,19 @@ class BaseSDK: sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ - def __init__(self, sdk_config: SDKConfiguration) -> None: + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: self.sdk_configuration = sdk_config + self.parent_ref = parent_ref def _get_url(self, base_url, url_variables): sdk_url, sdk_variables = self.sdk_configuration.get_server_details() @@ -218,12 +228,12 @@ def do_request( client = self.sdk_configuration.client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -237,16 +247,14 @@ def do(): http_res = client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -257,7 +265,7 @@ def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -267,7 +275,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res @@ -277,9 +285,7 @@ def do(): http_res = do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res @@ -294,12 +300,12 @@ async def do_request_async( client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + async def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -313,16 +319,14 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -333,7 +337,7 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -343,7 +347,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res @@ -355,8 +359,6 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index dba369bf..57b94eaf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -5,6 +5,7 @@ from mistralai_gcp._hooks import HookContext from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming +from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union @@ -41,6 +42,7 @@ def stream( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -58,14 +60,15 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -104,6 +107,7 @@ def stream( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request( @@ -135,6 +139,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], @@ -152,32 +157,23 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -209,6 +205,7 @@ async def stream_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -226,14 +223,15 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -272,6 +270,7 @@ async def stream_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request_async( @@ -303,6 +302,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], @@ -320,32 +320,23 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -385,6 +376,7 @@ def complete( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -400,14 +392,15 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -448,6 +441,7 @@ def complete( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request( @@ -479,6 +473,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], @@ -491,33 +486,22 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] + return unmarshal_json_response( + Optional[models.ChatCompletionResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -557,6 +541,7 @@ async def complete_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -572,14 +557,15 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -620,6 +606,7 @@ async def complete_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request_async( @@ -651,6 +638,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], @@ -663,30 +651,19 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] + return unmarshal_json_response( + Optional[models.ChatCompletionResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 84821c6a..5909bf69 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -5,6 +5,7 @@ from mistralai_gcp._hooks import HookContext from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming +from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional, Union @@ -38,7 +39,7 @@ def stream( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -105,6 +106,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], @@ -122,32 +124,23 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -176,7 +169,7 @@ async def stream_async( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -243,6 +236,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], @@ -260,32 +254,23 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -314,7 +299,7 @@ def complete( FIM completion. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -381,6 +366,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], @@ -393,33 +379,22 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.FIMCompletionResponse] + return unmarshal_json_response( + Optional[models.FIMCompletionResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -448,7 +423,7 @@ async def complete_async( FIM completion. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -515,6 +490,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], @@ -527,30 +503,19 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.FIMCompletionResponse] + return unmarshal_json_response( + Optional[models.FIMCompletionResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py index 1e426352..47b052cb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -2,7 +2,6 @@ # pyright: reportReturnType = false import asyncio -from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -116,21 +115,12 @@ def close_clients( pass if async_client is not None and not async_client_supplied: - is_async = False try: - asyncio.get_running_loop() - is_async = True + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: + try: asyncio.run(async_client.aclose()) - except Exception: - pass + except RuntimeError: + # best effort + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index 752e70e6..fe85b133 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -1,122 +1,154 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, -) -from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceFinishReason, - ChatCompletionChoiceTypedDict, -) -from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, - ChatCompletionRequestStop, - ChatCompletionRequestStopTypedDict, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, -) -from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, -) -from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, -) -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, - FinishReason, -) -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict -from .fimcompletionrequest import ( - FIMCompletionRequest, - FIMCompletionRequestStop, - FIMCompletionRequestStopTypedDict, - FIMCompletionRequestTypedDict, -) -from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict -from .fimcompletionstreamrequest import ( - FIMCompletionStreamRequest, - FIMCompletionStreamRequestStop, - FIMCompletionStreamRequestStopTypedDict, - FIMCompletionStreamRequestTypedDict, -) -from .function import Function, FunctionTypedDict -from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, -) -from .functionname import FunctionName, FunctionNameTypedDict -from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .imageurl import ImageURL, ImageURLTypedDict -from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, -) -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .responseformats import ResponseFormats -from .sdkerror import SDKError -from .security import Security, SecurityTypedDict -from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, -) -from .textchunk import TextChunk, TextChunkTypedDict, Type -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, -) -from .tooltypes import ToolTypes -from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, -) -from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, -) +from .mistralgcperror import MistralGcpError +from typing import TYPE_CHECKING +from importlib import import_module +import builtins +import sys +if TYPE_CHECKING: + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageRole, + AssistantMessageTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessages, + ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, + ) + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, + FinishReason, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict + from .deltamessage import ( + Content, + ContentTypedDict, + DeltaMessage, + DeltaMessageTypedDict, + ) + from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, + ) + from .fimcompletionresponse import ( + FIMCompletionResponse, + FIMCompletionResponseTypedDict, + ) + from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, + ) + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .mistralpromptmode import MistralPromptMode + from .no_response_error import NoResponseError + from .prediction import Prediction, PredictionTypedDict + from .referencechunk import ( + ReferenceChunk, + ReferenceChunkType, + ReferenceChunkTypedDict, + ) + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + from .security import Security, SecurityTypedDict + from .systemmessage import ( + Role, + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict, Type + from .thinkchunk import ( + ThinkChunk, + ThinkChunkType, + ThinkChunkTypedDict, + Thinking, + ThinkingTypedDict, + ) + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, + ) + from .tooltypes import ToolTypes + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) __all__ = [ "Arguments", @@ -187,6 +219,9 @@ "LocTypedDict", "Messages", "MessagesTypedDict", + "MistralGcpError", + "MistralPromptMode", + "NoResponseError", "Prediction", "PredictionTypedDict", "ReferenceChunk", @@ -195,6 +230,7 @@ "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", + "ResponseValidationError", "Role", "SDKError", "Security", @@ -203,10 +239,17 @@ "StopTypedDict", "SystemMessage", "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkType", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", @@ -231,3 +274,165 @@ "ValidationError", "ValidationErrorTypedDict", ] + +_dynamic_imports: dict[str, str] = { + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageRole": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessages": ".chatcompletionrequest", + "ChatCompletionRequestMessagesTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "Messages": ".chatcompletionstreamrequest", + "MessagesTypedDict": ".chatcompletionstreamrequest", + "Stop": ".chatcompletionstreamrequest", + "StopTypedDict": ".chatcompletionstreamrequest", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "FinishReason": ".completionresponsestreamchoice", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "Content": ".deltamessage", + "ContentTypedDict": ".deltamessage", + "DeltaMessage": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "FIMCompletionRequest": ".fimcompletionrequest", + "FIMCompletionRequestStop": ".fimcompletionrequest", + "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", + "FIMCompletionRequestTypedDict": ".fimcompletionrequest", + "FIMCompletionResponse": ".fimcompletionresponse", + "FIMCompletionResponseTypedDict": ".fimcompletionresponse", + "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkImageURL": ".imageurlchunk", + "ImageURLChunkImageURLTypedDict": ".imageurlchunk", + "ImageURLChunkType": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "MistralPromptMode": ".mistralpromptmode", + "NoResponseError": ".no_response_error", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkType": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", + "Security": ".security", + "SecurityTypedDict": ".security", + "Role": ".systemmessage", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "Type": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkType": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageRole": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolTypes": ".tooltypes", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageRole": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", +} + + +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " + ) + + try: + module = dynamic_import(module_name) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 9147f566..794b8c80 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -56,7 +56,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index a0125c35..d693e3c3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai_gcp.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -61,11 +63,13 @@ "ChatCompletionRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionRequestToolChoice = TypeAliasType( "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionRequestTypedDict(TypedDict): @@ -86,16 +90,23 @@ class ChatCompletionRequestTypedDict(TypedDict): random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class ChatCompletionRequest(BaseModel): @@ -124,23 +135,33 @@ class ChatCompletionRequest(BaseModel): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -159,15 +180,23 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py index 0404a9d2..a7953eb1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class ChatCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class ChatCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class ChatCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class ChatCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index 656f1d58..c2d25128 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai_gcp.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -57,11 +59,13 @@ "ChatCompletionStreamRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionStreamRequestToolChoice = TypeAliasType( "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionStreamRequestTypedDict(TypedDict): @@ -81,16 +85,23 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class ChatCompletionStreamRequest(BaseModel): @@ -118,23 +129,33 @@ class ChatCompletionStreamRequest(BaseModel): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -153,15 +174,23 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py index 8d779971..1be7dbdc 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py @@ -38,7 +38,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py index f9f0868b..1801ac76 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -46,7 +46,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 6dfb7373..12af226c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -27,10 +27,7 @@ class FIMCompletionRequestTypedDict(TypedDict): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" temperature: NotRequired[Nullable[float]] @@ -53,10 +50,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" @@ -110,7 +104,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py index a4d273a2..e1940b0a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class FIMCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class FIMCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class FIMCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class FIMCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 406749bb..ba7a66d2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -27,10 +27,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" temperature: NotRequired[Nullable[float]] @@ -52,10 +49,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" @@ -108,7 +102,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py index 11024f85..e1f11adc 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py @@ -2,7 +2,8 @@ from __future__ import annotations from .validationerror import ValidationError -from mistralai_gcp import utils +import httpx +from mistralai_gcp.models import MistralGcpError from mistralai_gcp.types import BaseModel from typing import List, Optional @@ -11,11 +12,15 @@ class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None -class HTTPValidationError(Exception): +class HTTPValidationError(MistralGcpError): data: HTTPValidationErrorData - def __init__(self, data: HTTPValidationErrorData): + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) self.data = data - - def __str__(self) -> str: - return utils.marshal_json(self.data, HTTPValidationErrorData) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py index e7aa11f0..20d4ba77 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py @@ -32,7 +32,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py index 2529ce31..26914b2f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py @@ -40,7 +40,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py new file mode 100644 index 00000000..a3c60cec --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional + + +class MistralGcpError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers + raw_response: httpx.Response + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + self.message = message + self.status_code = raw_response.status_code + self.body = body if body is not None else raw_response.text + self.headers = raw_response.headers + self.raw_response = raw_response + + def __str__(self): + return self.message diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py new file mode 100644 index 00000000..3f4de0fa --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py b/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py new file mode 100644 index 00000000..f98beea2 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + self.message = message + super().__init__(message) + + def __str__(self): + return self.message diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py index 742aac0b..36c87ab0 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py @@ -10,11 +10,15 @@ class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + type: Literal["content"] content: NotRequired[str] class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + TYPE: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index 5a24f644..9fe5116c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -16,14 +16,16 @@ class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: NotRequired[ResponseFormats] - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: Optional[ResponseFormats] = None - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: OptionalNullable[JSONSchema] = UNSET @@ -37,7 +39,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py index 08c39951..258fe70e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py @@ -5,4 +5,3 @@ ResponseFormats = Literal["text", "json_object", "json_schema"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py new file mode 100644 index 00000000..8d9b9f60 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional + +from mistralai_gcp.models import MistralGcpError + + +class ResponseValidationError(MistralGcpError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py index 03216cbf..e85b4f49 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py @@ -1,22 +1,38 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from dataclasses import dataclass -from typing import Optional import httpx +from typing import Optional + +from mistralai_gcp.models import MistralGcpError + +MAX_MESSAGE_LEN = 10_000 + + +class SDKError(MistralGcpError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + if message: + message += ": " + message += f"Status {raw_response.status_code}" -@dataclass -class SDKError(Exception): - """Represents an error returned by the API.""" + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" - message: str - status_code: int = -1 - body: str = "" - raw_response: Optional[httpx.Response] = None + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" - def __str__(self): - body = "" - if len(self.body) > 0: - body = f"\n{self.body}" + message += f". Body: {body_display}" + message = message.strip() - return f"{self.message}: Status {self.status_code}{body}" + super().__init__(message, raw_response, body) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py index f14acf12..e0fa6993 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -1,19 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) from mistralai_gcp.types import BaseModel from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( - "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], ) SystemMessageContent = TypeAliasType( - "SystemMessageContent", Union[str, List[TextChunk]] + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..e0b5bbc3 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from mistralai_gcp.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py new file mode 100644 index 00000000..9c3010e2 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) + + +ThinkChunkType = Literal["thinking"] + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkingTypedDict] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + type: NotRequired[ThinkChunkType] + + +class ThinkChunk(BaseModel): + thinking: List[Thinking] + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + type: Optional[ThinkChunkType] = "thinking" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py index 886b6ff1..bd187b32 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -51,7 +51,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py index 9de6af7e..59f36158 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py @@ -1,19 +1,82 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing_extensions import TypedDict +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class UsageInfoTypedDict(TypedDict): - prompt_tokens: int - completion_tokens: int - total_tokens: int + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] class UsageInfo(BaseModel): - prompt_tokens: int + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - completion_tokens: int + prompt_tokens: Optional[int] = 0 - total_tokens: int + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + nullable_fields = ["prompt_audio_seconds"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py index 287bb1b4..1f9a1630 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -39,7 +39,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index c373d27d..cf85c47e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._hooks import SDKHooks from ._version import ( __gen_version__, __openapi_doc_version__, @@ -42,9 +41,6 @@ class SDKConfiguration: retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None - def __post_init__(self): - self._hooks = SDKHooks() - def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} @@ -55,6 +51,3 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - - def get_hooks(self) -> SDKHooks: - return self._hooks diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py index a6187efa..231c2e37 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py @@ -2,7 +2,7 @@ from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union from typing_extensions import TypeAliasType, TypeAlias @@ -35,5 +35,5 @@ def __bool__(self) -> Literal[False]: "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) ) -UnrecognizedInt = NewType("UnrecognizedInt", int) -UnrecognizedStr = NewType("UnrecognizedStr", str) +UnrecognizedInt: TypeAlias = int +UnrecognizedStr: TypeAlias = str diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 3cded8fe..56164cf3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -1,50 +1,57 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .annotations import get_discriminator -from .enums import OpenEnumMeta -from .headers import get_headers, get_response_headers -from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, -) -from .queryparams import get_query_params -from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig -from .requestbodies import serialize_request_body, SerializedRequestBody -from .security import get_security -from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - validate_open_enum, -) -from .url import generate_url, template_url, remove_suffix -from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, -) -from .logger import Logger, get_body_content, get_default_logger +from typing import TYPE_CHECKING +from importlib import import_module +import builtins +import sys + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger __all__ = [ "BackoffStrategy", @@ -55,6 +62,7 @@ "get_body_content", "get_default_logger", "get_discriminator", + "parse_datetime", "get_global_from_env", "get_headers", "get_pydantic_model", @@ -97,3 +105,93 @@ "validate_open_enum", "cast_partial", ] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "validate_open_enum": ".serializers", + "cast_partial": ".values", +} + + +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " + ) + + try: + module = dynamic_import(module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py new file mode 100644 index 00000000..a6c52cd6 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py index c650b10c..c3bc13cf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py @@ -1,34 +1,74 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import enum - +import sys class OpenEnumMeta(enum.EnumMeta): - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py index 74a63f75..0969899b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py @@ -17,6 +17,9 @@ class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] @@ -25,9 +28,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref def __iter__(self): return self @@ -43,6 +48,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] @@ -51,9 +59,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref def __aiter__(self): return self diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py index 0472aba8..e873495f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py @@ -86,11 +86,39 @@ def _populate_form( return form +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + def serialize_multipart_form( media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: form: Dict[str, Any] = {} - files: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] if not isinstance(request, BaseModel): raise TypeError("invalid request body type") @@ -112,39 +140,32 @@ def serialize_multipart_form( f_name = field.alias if field.alias else name if field_metadata.file: - file_fields: Dict[str, FieldInfo] = val.__class__.model_fields - - file_name = "" - content = None - content_type = None - - for file_field_name in file_fields: - file_field = file_fields[file_field_name] + if isinstance(val, List): + # Handle array of files + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties(file_obj) - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue + if content_type is not None: + files.append((f_name + "[]", (file_name, content, content_type))) + else: + files.append((f_name + "[]", (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) - if file_metadata.content: - content = getattr(val, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(val, file_field_name, None) + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) else: - file_name = getattr(val, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - if content_type is not None: - files[f_name] = (file_name, content, content_type) - else: - files[f_name] = (file_name, content) + files.append((f_name, (file_name, content))) elif field_metadata.json: - files[f_name] = ( + files.append((f_name, ( None, marshal_json(val, request_field_types[name]), "application/json", - ) + ))) else: if isinstance(val, List): values = [] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index baa41fbd..378a14c0 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -1,13 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from decimal import Decimal +import functools import json -from typing import Any, Dict, List, Union, get_args -import httpx +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions from typing_extensions import get_origin + +import httpx from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset @@ -185,6 +188,15 @@ def is_nullable(field): return False +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) + + def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) @@ -217,3 +229,21 @@ def _contains_pydantic_model(data: Any) -> bool: return any(_contains_pydantic_model(value) for value in data.values()) return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py new file mode 100644 index 00000000..8fe5c996 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, Optional + +import httpx + +from .serializers import unmarshal_json +from mistralai_gcp import models + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise models.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e