diff --git a/.gitignore b/.gitignore index 336f773d..cf2de5ee 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.env.local .idea **/__pycache__/ **/.speakeasy/temp/ diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 3136ceae..6d9c76eb 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -3,19 +3,23 @@ id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: docChecksum: 3135f1ce6dd57e0487ee2840362ced1a docVersion: 1.0.0 - speakeasyVersion: 1.606.10 - generationVersion: 2.687.13 - releaseVersion: 1.10.0 - configChecksum: 1446aab5f184e7184590fe5756b556a8 + speakeasyVersion: 1.681.0 + generationVersion: 2.789.5 + releaseVersion: 1.11.0 + configChecksum: 99d8b30f701935f8b8bf94786669ddb1 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true +persistentEdits: + generation_id: ee70f527-0911-4f49-95b6-782891c44d66 + pristine_commit_hash: 9ea45e1424b7577e3a7507396d0327e0d8e77941 + pristine_tree_hash: 784012933707b2069b991641187d4466e901d6e1 features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.20.1 + core: 5.23.15 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 @@ -24,758 +28,2932 @@ features: examples: 3.0.2 flatRequests: 1.0.1 flattening: 3.1.1 - globalSecurity: 3.0.3 + globalSecurity: 3.0.4 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.1 + globalServerURLs: 3.2.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 nameOverrides: 3.0.1 nullables: 1.0.1 openEnums: 1.0.1 responseFormat: 1.0.1 - retries: 3.0.2 - sdkHooks: 1.1.0 - serverEvents: 1.0.8 + retries: 3.0.3 + sdkHooks: 1.2.0 + serverEvents: 1.0.11 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.4 + unions: 3.1.1 uploadStreams: 1.0.0 -generatedFiles: - - .gitattributes - - .vscode/settings.json - - USAGE.md - - docs/models/agent.md - - docs/models/agentconversation.md - - docs/models/agentconversationobject.md - - docs/models/agentcreationrequest.md - - docs/models/agentcreationrequesttools.md - - docs/models/agenthandoffdoneevent.md - - docs/models/agenthandoffdoneeventtype.md - - docs/models/agenthandoffentry.md - - docs/models/agenthandoffentryobject.md - - docs/models/agenthandoffentrytype.md - - docs/models/agenthandoffstartedevent.md - - docs/models/agenthandoffstartedeventtype.md - - docs/models/agentobject.md - - docs/models/agentsapiv1agentsdeleterequest.md - - docs/models/agentsapiv1agentsgetrequest.md - - docs/models/agentsapiv1agentslistrequest.md - - docs/models/agentsapiv1agentsupdaterequest.md - - docs/models/agentsapiv1agentsupdateversionrequest.md - - docs/models/agentsapiv1conversationsappendrequest.md - - docs/models/agentsapiv1conversationsappendstreamrequest.md - - docs/models/agentsapiv1conversationsdeleterequest.md - - docs/models/agentsapiv1conversationsgetrequest.md - - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md - - docs/models/agentsapiv1conversationshistoryrequest.md - - docs/models/agentsapiv1conversationslistrequest.md - - docs/models/agentsapiv1conversationsmessagesrequest.md - - docs/models/agentsapiv1conversationsrestartrequest.md - - docs/models/agentsapiv1conversationsrestartstreamrequest.md - - docs/models/agentscompletionrequest.md - - docs/models/agentscompletionrequestmessages.md - - docs/models/agentscompletionrequeststop.md - - docs/models/agentscompletionrequesttoolchoice.md - - docs/models/agentscompletionstreamrequest.md - - docs/models/agentscompletionstreamrequestmessages.md - - docs/models/agentscompletionstreamrequeststop.md - - docs/models/agentscompletionstreamrequesttoolchoice.md - - docs/models/agenttools.md - - docs/models/agentupdaterequest.md - - docs/models/agentupdaterequesttools.md - - docs/models/apiendpoint.md - - docs/models/archiveftmodelout.md - - docs/models/archiveftmodeloutobject.md - - docs/models/arguments.md - - docs/models/assistantmessage.md - - docs/models/assistantmessagecontent.md - - docs/models/assistantmessagerole.md - - docs/models/attributes.md - - docs/models/audiochunk.md - - docs/models/audiochunktype.md - - docs/models/audiotranscriptionrequest.md - - docs/models/audiotranscriptionrequeststream.md - - docs/models/basemodelcard.md - - docs/models/basemodelcardtype.md - - docs/models/batcherror.md - - docs/models/batchjobin.md - - docs/models/batchjobout.md - - docs/models/batchjoboutobject.md - - docs/models/batchjobsout.md - - docs/models/batchjobsoutobject.md - - docs/models/batchjobstatus.md - - docs/models/builtinconnectors.md - - docs/models/chatclassificationrequest.md - - docs/models/chatcompletionchoice.md - - docs/models/chatcompletionrequest.md - - docs/models/chatcompletionrequesttoolchoice.md - - docs/models/chatcompletionresponse.md - - docs/models/chatcompletionstreamrequest.md - - docs/models/chatcompletionstreamrequestmessages.md - - docs/models/chatcompletionstreamrequeststop.md - - docs/models/chatcompletionstreamrequesttoolchoice.md - - docs/models/chatmoderationrequest.md - - docs/models/chatmoderationrequestinputs.md - - docs/models/checkpointout.md - - docs/models/classificationrequest.md - - docs/models/classificationrequestinputs.md - - docs/models/classificationresponse.md - - docs/models/classificationtargetresult.md - - docs/models/classifierdetailedjobout.md - - docs/models/classifierdetailedjoboutintegrations.md - - docs/models/classifierdetailedjoboutjobtype.md - - docs/models/classifierdetailedjoboutobject.md - - docs/models/classifierdetailedjoboutstatus.md - - docs/models/classifierftmodelout.md - - docs/models/classifierftmodeloutmodeltype.md - - docs/models/classifierftmodeloutobject.md - - docs/models/classifierjobout.md - - docs/models/classifierjoboutintegrations.md - - docs/models/classifierjoboutjobtype.md - - docs/models/classifierjoboutobject.md - - docs/models/classifierjoboutstatus.md - - docs/models/classifiertargetin.md - - docs/models/classifiertargetout.md - - docs/models/classifiertrainingparameters.md - - docs/models/classifiertrainingparametersin.md - - docs/models/codeinterpretertool.md - - docs/models/codeinterpretertooltype.md - - docs/models/completionargs.md - - docs/models/completionargsstop.md - - docs/models/completionchunk.md - - docs/models/completiondetailedjobout.md - - docs/models/completiondetailedjoboutintegrations.md - - docs/models/completiondetailedjoboutjobtype.md - - docs/models/completiondetailedjoboutobject.md - - docs/models/completiondetailedjoboutrepositories.md - - docs/models/completiondetailedjoboutstatus.md - - docs/models/completionevent.md - - docs/models/completionftmodelout.md - - docs/models/completionftmodeloutobject.md - - docs/models/completionjobout.md - - docs/models/completionjoboutobject.md - - docs/models/completionresponsestreamchoice.md - - docs/models/completionresponsestreamchoicefinishreason.md - - docs/models/completiontrainingparameters.md - - docs/models/completiontrainingparametersin.md - - docs/models/content.md - - docs/models/contentchunk.md - - docs/models/conversationappendrequest.md - - docs/models/conversationappendrequesthandoffexecution.md - - docs/models/conversationappendstreamrequest.md - - docs/models/conversationappendstreamrequesthandoffexecution.md - - docs/models/conversationevents.md - - docs/models/conversationeventsdata.md - - docs/models/conversationhistory.md - - docs/models/conversationhistoryobject.md - - docs/models/conversationinputs.md - - docs/models/conversationmessages.md - - docs/models/conversationmessagesobject.md - - docs/models/conversationrequest.md - - docs/models/conversationresponse.md - - docs/models/conversationresponseobject.md - - docs/models/conversationrestartrequest.md - - docs/models/conversationrestartrequesthandoffexecution.md - - docs/models/conversationrestartstreamrequest.md - - docs/models/conversationrestartstreamrequesthandoffexecution.md - - docs/models/conversationstreamrequest.md - - docs/models/conversationstreamrequesthandoffexecution.md - - docs/models/conversationstreamrequesttools.md - - docs/models/conversationusageinfo.md - - docs/models/data.md - - docs/models/deletefileout.md - - docs/models/deletemodelout.md - - docs/models/deletemodelv1modelsmodeliddeleterequest.md - - docs/models/deltamessage.md - - docs/models/document.md - - docs/models/documentlibrarytool.md - - docs/models/documentlibrarytooltype.md - - docs/models/documentout.md - - docs/models/documenttextcontent.md - - docs/models/documentupdatein.md - - docs/models/documenturlchunk.md - - docs/models/documenturlchunktype.md - - docs/models/embeddingdtype.md - - docs/models/embeddingrequest.md - - docs/models/embeddingrequestinputs.md - - docs/models/embeddingresponse.md - - docs/models/embeddingresponsedata.md - - docs/models/encodingformat.md - - docs/models/entitytype.md - - docs/models/entries.md - - docs/models/eventout.md - - docs/models/file.md - - docs/models/filechunk.md - - docs/models/filepurpose.md - - docs/models/filesapiroutesdeletefilerequest.md - - docs/models/filesapiroutesdownloadfilerequest.md - - docs/models/filesapiroutesgetsignedurlrequest.md - - docs/models/filesapirouteslistfilesrequest.md - - docs/models/filesapiroutesretrievefilerequest.md - - docs/models/filesapiroutesuploadfilemultipartbodyparams.md - - docs/models/fileschema.md - - docs/models/filesignedurl.md - - docs/models/fimcompletionrequest.md - - docs/models/fimcompletionrequeststop.md - - docs/models/fimcompletionresponse.md - - docs/models/fimcompletionstreamrequest.md - - docs/models/fimcompletionstreamrequeststop.md - - docs/models/finetuneablemodeltype.md - - docs/models/finishreason.md - - docs/models/format_.md - - docs/models/ftclassifierlossfunction.md - - docs/models/ftmodelcapabilitiesout.md - - docs/models/ftmodelcard.md - - docs/models/ftmodelcardtype.md - - docs/models/function.md - - docs/models/functioncall.md - - docs/models/functioncallentry.md - - docs/models/functioncallentryarguments.md - - docs/models/functioncallentryobject.md - - docs/models/functioncallentrytype.md - - docs/models/functioncallevent.md - - docs/models/functioncalleventtype.md - - docs/models/functionname.md - - docs/models/functionresultentry.md - - docs/models/functionresultentryobject.md - - docs/models/functionresultentrytype.md - - docs/models/functiontool.md - - docs/models/functiontooltype.md - - docs/models/githubrepositoryin.md - - docs/models/githubrepositoryintype.md - - docs/models/githubrepositoryout.md - - docs/models/githubrepositoryouttype.md - - docs/models/handoffexecution.md - - docs/models/httpvalidationerror.md - - docs/models/hyperparameters.md - - docs/models/imagegenerationtool.md - - docs/models/imagegenerationtooltype.md - - docs/models/imageurl.md - - docs/models/imageurlchunk.md - - docs/models/imageurlchunkimageurl.md - - docs/models/imageurlchunktype.md - - docs/models/inputentries.md - - docs/models/inputs.md - - docs/models/instructrequest.md - - docs/models/instructrequestinputs.md - - docs/models/instructrequestinputsmessages.md - - docs/models/instructrequestmessages.md - - docs/models/integrations.md - - docs/models/jobin.md - - docs/models/jobinintegrations.md - - docs/models/jobinrepositories.md - - docs/models/jobmetadataout.md - - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md - - docs/models/jobsapiroutesbatchgetbatchjobrequest.md - - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md - - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md - - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md - - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md - - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md - - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md - - docs/models/jobsout.md - - docs/models/jobsoutdata.md - - docs/models/jobsoutobject.md - - docs/models/jobtype.md - - docs/models/jsonschema.md - - docs/models/legacyjobmetadataout.md - - docs/models/legacyjobmetadataoutobject.md - - docs/models/librariesdeletev1request.md - - docs/models/librariesdocumentsdeletev1request.md - - docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md - - docs/models/librariesdocumentsgetsignedurlv1request.md - - docs/models/librariesdocumentsgetstatusv1request.md - - docs/models/librariesdocumentsgettextcontentv1request.md - - docs/models/librariesdocumentsgetv1request.md - - docs/models/librariesdocumentslistv1request.md - - docs/models/librariesdocumentsreprocessv1request.md - - docs/models/librariesdocumentsupdatev1request.md - - docs/models/librariesdocumentsuploadv1documentupload.md - - docs/models/librariesdocumentsuploadv1request.md - - docs/models/librariesgetv1request.md - - docs/models/librariessharecreatev1request.md - - docs/models/librariessharedeletev1request.md - - docs/models/librariessharelistv1request.md - - docs/models/librariesupdatev1request.md - - docs/models/libraryin.md - - docs/models/libraryinupdate.md - - docs/models/libraryout.md - - docs/models/listdocumentout.md - - docs/models/listfilesout.md - - docs/models/listlibraryout.md - - docs/models/listsharingout.md - - docs/models/loc.md - - docs/models/messageentries.md - - docs/models/messageinputcontentchunks.md - - docs/models/messageinputentry.md - - docs/models/messageinputentrycontent.md - - docs/models/messageinputentryrole.md - - docs/models/messageinputentrytype.md - - docs/models/messageoutputcontentchunks.md - - docs/models/messageoutputentry.md - - docs/models/messageoutputentrycontent.md - - docs/models/messageoutputentryobject.md - - docs/models/messageoutputentryrole.md - - docs/models/messageoutputentrytype.md - - docs/models/messageoutputevent.md - - docs/models/messageoutputeventcontent.md - - docs/models/messageoutputeventrole.md - - docs/models/messageoutputeventtype.md - - docs/models/messages.md - - docs/models/metricout.md - - docs/models/mistralpromptmode.md - - docs/models/modelcapabilities.md - - docs/models/modelconversation.md - - docs/models/modelconversationobject.md - - docs/models/modelconversationtools.md - - docs/models/modellist.md - - docs/models/modeltype.md - - docs/models/moderationobject.md - - docs/models/moderationresponse.md - - docs/models/name.md - - docs/models/object.md - - docs/models/ocrimageobject.md - - docs/models/ocrpagedimensions.md - - docs/models/ocrpageobject.md - - docs/models/ocrrequest.md - - docs/models/ocrresponse.md - - docs/models/ocrtableobject.md - - docs/models/ocrusageinfo.md - - docs/models/one.md - - docs/models/outputcontentchunks.md - - docs/models/outputs.md - - docs/models/paginationinfo.md - - docs/models/prediction.md - - docs/models/processingstatusout.md - - docs/models/queryparamstatus.md - - docs/models/referencechunk.md - - docs/models/referencechunktype.md - - docs/models/repositories.md - - docs/models/requestsource.md - - docs/models/response1.md - - docs/models/responsebody.md - - docs/models/responsedoneevent.md - - docs/models/responsedoneeventtype.md - - docs/models/responseerrorevent.md - - docs/models/responseerroreventtype.md - - docs/models/responseformat.md - - docs/models/responseformats.md - - docs/models/responsestartedevent.md - - docs/models/responsestartedeventtype.md - - docs/models/retrievefileout.md - - docs/models/retrievemodelv1modelsmodelidgetrequest.md - - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md - - docs/models/role.md - - docs/models/sampletype.md - - docs/models/security.md - - docs/models/shareenum.md - - docs/models/sharingdelete.md - - docs/models/sharingin.md - - docs/models/sharingout.md - - docs/models/source.md - - docs/models/ssetypes.md - - docs/models/status.md - - docs/models/stop.md - - docs/models/systemmessage.md - - docs/models/systemmessagecontent.md - - docs/models/systemmessagecontentchunks.md - - docs/models/tableformat.md - - docs/models/textchunk.md - - docs/models/textchunktype.md - - docs/models/thinkchunk.md - - docs/models/thinkchunktype.md - - docs/models/thinking.md - - docs/models/timestampgranularity.md - - docs/models/tool.md - - docs/models/toolcall.md - - docs/models/toolchoice.md - - docs/models/toolchoiceenum.md - - docs/models/toolexecutiondeltaevent.md - - docs/models/toolexecutiondeltaeventname.md - - docs/models/toolexecutiondeltaeventtype.md - - docs/models/toolexecutiondoneevent.md - - docs/models/toolexecutiondoneeventname.md - - docs/models/toolexecutiondoneeventtype.md - - docs/models/toolexecutionentry.md - - docs/models/toolexecutionentryobject.md - - docs/models/toolexecutionentrytype.md - - docs/models/toolexecutionstartedevent.md - - docs/models/toolexecutionstartedeventname.md - - docs/models/toolexecutionstartedeventtype.md - - docs/models/toolfilechunk.md - - docs/models/toolfilechunktype.md - - docs/models/toolmessage.md - - docs/models/toolmessagecontent.md - - docs/models/toolmessagerole.md - - docs/models/toolreferencechunk.md - - docs/models/toolreferencechunktype.md - - docs/models/tools.md - - docs/models/tooltypes.md - - docs/models/trainingfile.md - - docs/models/transcriptionresponse.md - - docs/models/transcriptionsegmentchunk.md - - docs/models/transcriptionstreamdone.md - - docs/models/transcriptionstreamdonetype.md - - docs/models/transcriptionstreamevents.md - - docs/models/transcriptionstreameventsdata.md - - docs/models/transcriptionstreameventtypes.md - - docs/models/transcriptionstreamlanguage.md - - docs/models/transcriptionstreamlanguagetype.md - - docs/models/transcriptionstreamsegmentdelta.md - - docs/models/transcriptionstreamsegmentdeltatype.md - - docs/models/transcriptionstreamtextdelta.md - - docs/models/transcriptionstreamtextdeltatype.md - - docs/models/two.md - - docs/models/type.md - - docs/models/unarchiveftmodelout.md - - docs/models/unarchiveftmodeloutobject.md - - docs/models/updateftmodelin.md - - docs/models/uploadfileout.md - - docs/models/usageinfo.md - - docs/models/usermessage.md - - docs/models/usermessagecontent.md - - docs/models/usermessagerole.md - - docs/models/utils/retryconfig.md - - docs/models/validationerror.md - - docs/models/wandbintegration.md - - docs/models/wandbintegrationout.md - - docs/models/wandbintegrationouttype.md - - docs/models/wandbintegrationtype.md - - docs/models/websearchpremiumtool.md - - docs/models/websearchpremiumtooltype.md - - docs/models/websearchtool.md - - docs/models/websearchtooltype.md - - docs/sdks/accesses/README.md - - docs/sdks/agents/README.md - - docs/sdks/audio/README.md - - docs/sdks/batch/README.md - - docs/sdks/beta/README.md - - docs/sdks/chat/README.md - - docs/sdks/classifiers/README.md - - docs/sdks/conversations/README.md - - docs/sdks/documents/README.md - - docs/sdks/embeddings/README.md - - docs/sdks/files/README.md - - docs/sdks/fim/README.md - - docs/sdks/finetuning/README.md - - docs/sdks/jobs/README.md - - docs/sdks/libraries/README.md - - docs/sdks/mistral/README.md - - docs/sdks/mistralagents/README.md - - docs/sdks/mistraljobs/README.md - - docs/sdks/models/README.md - - docs/sdks/ocr/README.md - - docs/sdks/transcriptions/README.md - - poetry.toml - - py.typed - - scripts/prepare_readme.py - - scripts/publish.sh - - src/mistralai/__init__.py - - src/mistralai/_hooks/__init__.py - - src/mistralai/_hooks/sdkhooks.py - - src/mistralai/_hooks/types.py - - src/mistralai/_version.py - - src/mistralai/accesses.py - - src/mistralai/agents.py - - src/mistralai/audio.py - - src/mistralai/basesdk.py - - src/mistralai/batch.py - - src/mistralai/beta.py - - src/mistralai/chat.py - - src/mistralai/classifiers.py - - src/mistralai/conversations.py - - src/mistralai/documents.py - - src/mistralai/embeddings.py - - src/mistralai/files.py - - src/mistralai/fim.py - - src/mistralai/fine_tuning.py - - src/mistralai/httpclient.py - - src/mistralai/jobs.py - - src/mistralai/libraries.py - - src/mistralai/mistral_agents.py - - src/mistralai/mistral_jobs.py - - src/mistralai/models/__init__.py - - src/mistralai/models/agent.py - - src/mistralai/models/agentconversation.py - - src/mistralai/models/agentcreationrequest.py - - src/mistralai/models/agenthandoffdoneevent.py - - src/mistralai/models/agenthandoffentry.py - - src/mistralai/models/agenthandoffstartedevent.py - - src/mistralai/models/agents_api_v1_agents_deleteop.py - - src/mistralai/models/agents_api_v1_agents_getop.py - - src/mistralai/models/agents_api_v1_agents_listop.py - - src/mistralai/models/agents_api_v1_agents_update_versionop.py - - src/mistralai/models/agents_api_v1_agents_updateop.py - - src/mistralai/models/agents_api_v1_conversations_append_streamop.py - - src/mistralai/models/agents_api_v1_conversations_appendop.py - - src/mistralai/models/agents_api_v1_conversations_deleteop.py - - src/mistralai/models/agents_api_v1_conversations_getop.py - - src/mistralai/models/agents_api_v1_conversations_historyop.py - - src/mistralai/models/agents_api_v1_conversations_listop.py - - src/mistralai/models/agents_api_v1_conversations_messagesop.py - - src/mistralai/models/agents_api_v1_conversations_restart_streamop.py - - src/mistralai/models/agents_api_v1_conversations_restartop.py - - src/mistralai/models/agentscompletionrequest.py - - src/mistralai/models/agentscompletionstreamrequest.py - - src/mistralai/models/agentupdaterequest.py - - src/mistralai/models/apiendpoint.py - - src/mistralai/models/archiveftmodelout.py - - src/mistralai/models/assistantmessage.py - - src/mistralai/models/audiochunk.py - - src/mistralai/models/audiotranscriptionrequest.py - - src/mistralai/models/audiotranscriptionrequeststream.py - - src/mistralai/models/basemodelcard.py - - src/mistralai/models/batcherror.py - - src/mistralai/models/batchjobin.py - - src/mistralai/models/batchjobout.py - - src/mistralai/models/batchjobsout.py - - src/mistralai/models/batchjobstatus.py - - src/mistralai/models/builtinconnectors.py - - src/mistralai/models/chatclassificationrequest.py - - src/mistralai/models/chatcompletionchoice.py - - src/mistralai/models/chatcompletionrequest.py - - src/mistralai/models/chatcompletionresponse.py - - src/mistralai/models/chatcompletionstreamrequest.py - - src/mistralai/models/chatmoderationrequest.py - - src/mistralai/models/checkpointout.py - - src/mistralai/models/classificationrequest.py - - src/mistralai/models/classificationresponse.py - - src/mistralai/models/classificationtargetresult.py - - src/mistralai/models/classifierdetailedjobout.py - - src/mistralai/models/classifierftmodelout.py - - src/mistralai/models/classifierjobout.py - - src/mistralai/models/classifiertargetin.py - - src/mistralai/models/classifiertargetout.py - - src/mistralai/models/classifiertrainingparameters.py - - src/mistralai/models/classifiertrainingparametersin.py - - src/mistralai/models/codeinterpretertool.py - - src/mistralai/models/completionargs.py - - src/mistralai/models/completionargsstop.py - - src/mistralai/models/completionchunk.py - - src/mistralai/models/completiondetailedjobout.py - - src/mistralai/models/completionevent.py - - src/mistralai/models/completionftmodelout.py - - src/mistralai/models/completionjobout.py - - src/mistralai/models/completionresponsestreamchoice.py - - src/mistralai/models/completiontrainingparameters.py - - src/mistralai/models/completiontrainingparametersin.py - - src/mistralai/models/contentchunk.py - - src/mistralai/models/conversationappendrequest.py - - src/mistralai/models/conversationappendstreamrequest.py - - src/mistralai/models/conversationevents.py - - src/mistralai/models/conversationhistory.py - - src/mistralai/models/conversationinputs.py - - src/mistralai/models/conversationmessages.py - - src/mistralai/models/conversationrequest.py - - src/mistralai/models/conversationresponse.py - - src/mistralai/models/conversationrestartrequest.py - - src/mistralai/models/conversationrestartstreamrequest.py - - src/mistralai/models/conversationstreamrequest.py - - src/mistralai/models/conversationusageinfo.py - - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py - - src/mistralai/models/deletefileout.py - - src/mistralai/models/deletemodelout.py - - src/mistralai/models/deltamessage.py - - src/mistralai/models/documentlibrarytool.py - - src/mistralai/models/documentout.py - - src/mistralai/models/documenttextcontent.py - - src/mistralai/models/documentupdatein.py - - src/mistralai/models/documenturlchunk.py - - src/mistralai/models/embeddingdtype.py - - src/mistralai/models/embeddingrequest.py - - src/mistralai/models/embeddingresponse.py - - src/mistralai/models/embeddingresponsedata.py - - src/mistralai/models/encodingformat.py - - src/mistralai/models/entitytype.py - - src/mistralai/models/eventout.py - - src/mistralai/models/file.py - - src/mistralai/models/filechunk.py - - src/mistralai/models/filepurpose.py - - src/mistralai/models/files_api_routes_delete_fileop.py - - src/mistralai/models/files_api_routes_download_fileop.py - - src/mistralai/models/files_api_routes_get_signed_urlop.py - - src/mistralai/models/files_api_routes_list_filesop.py - - src/mistralai/models/files_api_routes_retrieve_fileop.py - - src/mistralai/models/files_api_routes_upload_fileop.py - - src/mistralai/models/fileschema.py - - src/mistralai/models/filesignedurl.py - - src/mistralai/models/fimcompletionrequest.py - - src/mistralai/models/fimcompletionresponse.py - - src/mistralai/models/fimcompletionstreamrequest.py - - src/mistralai/models/finetuneablemodeltype.py - - src/mistralai/models/ftclassifierlossfunction.py - - src/mistralai/models/ftmodelcapabilitiesout.py - - src/mistralai/models/ftmodelcard.py - - src/mistralai/models/function.py - - src/mistralai/models/functioncall.py - - src/mistralai/models/functioncallentry.py - - src/mistralai/models/functioncallentryarguments.py - - src/mistralai/models/functioncallevent.py - - src/mistralai/models/functionname.py - - src/mistralai/models/functionresultentry.py - - src/mistralai/models/functiontool.py - - src/mistralai/models/githubrepositoryin.py - - src/mistralai/models/githubrepositoryout.py - - src/mistralai/models/httpvalidationerror.py - - src/mistralai/models/imagegenerationtool.py - - src/mistralai/models/imageurl.py - - src/mistralai/models/imageurlchunk.py - - src/mistralai/models/inputentries.py - - src/mistralai/models/inputs.py - - src/mistralai/models/instructrequest.py - - src/mistralai/models/jobin.py - - src/mistralai/models/jobmetadataout.py - - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py - - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py - - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py - - src/mistralai/models/jobsout.py - - src/mistralai/models/jsonschema.py - - src/mistralai/models/legacyjobmetadataout.py - - src/mistralai/models/libraries_delete_v1op.py - - src/mistralai/models/libraries_documents_delete_v1op.py - - src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py - - src/mistralai/models/libraries_documents_get_signed_url_v1op.py - - src/mistralai/models/libraries_documents_get_status_v1op.py - - src/mistralai/models/libraries_documents_get_text_content_v1op.py - - src/mistralai/models/libraries_documents_get_v1op.py - - src/mistralai/models/libraries_documents_list_v1op.py - - src/mistralai/models/libraries_documents_reprocess_v1op.py - - src/mistralai/models/libraries_documents_update_v1op.py - - src/mistralai/models/libraries_documents_upload_v1op.py - - src/mistralai/models/libraries_get_v1op.py - - src/mistralai/models/libraries_share_create_v1op.py - - src/mistralai/models/libraries_share_delete_v1op.py - - src/mistralai/models/libraries_share_list_v1op.py - - src/mistralai/models/libraries_update_v1op.py - - src/mistralai/models/libraryin.py - - src/mistralai/models/libraryinupdate.py - - src/mistralai/models/libraryout.py - - src/mistralai/models/listdocumentout.py - - src/mistralai/models/listfilesout.py - - src/mistralai/models/listlibraryout.py - - src/mistralai/models/listsharingout.py - - src/mistralai/models/messageentries.py - - src/mistralai/models/messageinputcontentchunks.py - - src/mistralai/models/messageinputentry.py - - src/mistralai/models/messageoutputcontentchunks.py - - src/mistralai/models/messageoutputentry.py - - src/mistralai/models/messageoutputevent.py - - src/mistralai/models/metricout.py - - src/mistralai/models/mistralerror.py - - src/mistralai/models/mistralpromptmode.py - - src/mistralai/models/modelcapabilities.py - - src/mistralai/models/modelconversation.py - - src/mistralai/models/modellist.py - - src/mistralai/models/moderationobject.py - - src/mistralai/models/moderationresponse.py - - src/mistralai/models/no_response_error.py - - src/mistralai/models/ocrimageobject.py - - src/mistralai/models/ocrpagedimensions.py - - src/mistralai/models/ocrpageobject.py - - src/mistralai/models/ocrrequest.py - - src/mistralai/models/ocrresponse.py - - src/mistralai/models/ocrtableobject.py - - src/mistralai/models/ocrusageinfo.py - - src/mistralai/models/outputcontentchunks.py - - src/mistralai/models/paginationinfo.py - - src/mistralai/models/prediction.py - - src/mistralai/models/processingstatusout.py - - src/mistralai/models/referencechunk.py - - src/mistralai/models/requestsource.py - - src/mistralai/models/responsedoneevent.py - - src/mistralai/models/responseerrorevent.py - - src/mistralai/models/responseformat.py - - src/mistralai/models/responseformats.py - - src/mistralai/models/responsestartedevent.py - - src/mistralai/models/responsevalidationerror.py - - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py - - src/mistralai/models/retrievefileout.py - - src/mistralai/models/sampletype.py - - src/mistralai/models/sdkerror.py - - src/mistralai/models/security.py - - src/mistralai/models/shareenum.py - - src/mistralai/models/sharingdelete.py - - src/mistralai/models/sharingin.py - - src/mistralai/models/sharingout.py - - src/mistralai/models/source.py - - src/mistralai/models/ssetypes.py - - src/mistralai/models/systemmessage.py - - src/mistralai/models/systemmessagecontentchunks.py - - src/mistralai/models/textchunk.py - - src/mistralai/models/thinkchunk.py - - src/mistralai/models/timestampgranularity.py - - src/mistralai/models/tool.py - - src/mistralai/models/toolcall.py - - src/mistralai/models/toolchoice.py - - src/mistralai/models/toolchoiceenum.py - - src/mistralai/models/toolexecutiondeltaevent.py - - src/mistralai/models/toolexecutiondoneevent.py - - src/mistralai/models/toolexecutionentry.py - - src/mistralai/models/toolexecutionstartedevent.py - - src/mistralai/models/toolfilechunk.py - - src/mistralai/models/toolmessage.py - - src/mistralai/models/toolreferencechunk.py - - src/mistralai/models/tooltypes.py - - src/mistralai/models/trainingfile.py - - src/mistralai/models/transcriptionresponse.py - - src/mistralai/models/transcriptionsegmentchunk.py - - src/mistralai/models/transcriptionstreamdone.py - - src/mistralai/models/transcriptionstreamevents.py - - src/mistralai/models/transcriptionstreameventtypes.py - - src/mistralai/models/transcriptionstreamlanguage.py - - src/mistralai/models/transcriptionstreamsegmentdelta.py - - src/mistralai/models/transcriptionstreamtextdelta.py - - src/mistralai/models/unarchiveftmodelout.py - - src/mistralai/models/updateftmodelin.py - - src/mistralai/models/uploadfileout.py - - src/mistralai/models/usageinfo.py - - src/mistralai/models/usermessage.py - - src/mistralai/models/validationerror.py - - src/mistralai/models/wandbintegration.py - - src/mistralai/models/wandbintegrationout.py - - src/mistralai/models/websearchpremiumtool.py - - src/mistralai/models/websearchtool.py - - src/mistralai/models_.py - - src/mistralai/ocr.py - - src/mistralai/py.typed - - src/mistralai/sdk.py - - src/mistralai/sdkconfiguration.py - - src/mistralai/transcriptions.py - - src/mistralai/types/__init__.py - - src/mistralai/types/basemodel.py - - src/mistralai/utils/__init__.py - - src/mistralai/utils/annotations.py - - src/mistralai/utils/datetimes.py - - src/mistralai/utils/enums.py - - src/mistralai/utils/eventstreaming.py - - src/mistralai/utils/forms.py - - src/mistralai/utils/headers.py - - src/mistralai/utils/logger.py - - src/mistralai/utils/metadata.py - - src/mistralai/utils/queryparams.py - - src/mistralai/utils/requestbodies.py - - src/mistralai/utils/retries.py - - src/mistralai/utils/security.py - - src/mistralai/utils/serializers.py - - src/mistralai/utils/unmarshal_json_response.py - - src/mistralai/utils/url.py - - src/mistralai/utils/values.py +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + USAGE.md: + id: 3aed33ce6e6f + last_write_checksum: sha1:4b34a680cd5a2b2acbadc41d0b309b3f30c1dfe5 + pristine_git_object: a31d502f33508216f686f4328cbbc8c14f8170ee + docs/models/agent.md: + id: ffdbb4c53c87 + last_write_checksum: sha1:ec6c799658040b3c75d6ae0572bb391c6aea3fd4 + pristine_git_object: ee054dd349848eff144d7064c319c3c8434bdc6c + docs/models/agentconversation.md: + id: 3590c1a566fa + last_write_checksum: sha1:a88c8e10a9de2bc99cabd38ab9fc775a2d33e9ef + pristine_git_object: 92fd673c0710889ae3f1d77f82c32113f39457b7 + docs/models/agentconversationobject.md: + id: cfd35d9dd4f2 + last_write_checksum: sha1:112552d4a241967cf0a7dcb981428e7e0715dc34 + pristine_git_object: ea7cc75c5197ed42f9fb508a969baa16effe1f98 + docs/models/agentcreationrequest.md: + id: 697a770fe5c0 + last_write_checksum: sha1:c8221a20a68675b444d668a58a649b25b54786e9 + pristine_git_object: afc27d3b688f9ca187606243c810fd19d12bb840 + docs/models/agentcreationrequesttools.md: + id: 932bf99a19a8 + last_write_checksum: sha1:49294bdd30b7413956bd8dc039ad7c9d15243282 + pristine_git_object: c2525850649b4dad76b44fd21cac822e12986818 + docs/models/agenthandoffdoneevent.md: + id: dcf166a3c3b0 + last_write_checksum: sha1:281473cbc3929e2deb3e069e74551e7e26b4fdba + pristine_git_object: c0039f41825e3667cd8e91adae5bb78a2e3ac8ae + docs/models/agenthandoffdoneeventtype.md: + id: 4d412ea3af67 + last_write_checksum: sha1:720ebe2c6029611b8ecd4caa1b5a58d6417251c6 + pristine_git_object: c864ce4381eb30532feb010b39b991a2070f134b + docs/models/agenthandoffentry.md: + id: 39d54f489b84 + last_write_checksum: sha1:7d949e750fd24dea20cabae340f9204d8f756008 + pristine_git_object: 8831b0ebad1c4e857f4f4353d1815753bb13125f + docs/models/agenthandoffentryobject.md: + id: ac62dd5f1002 + last_write_checksum: sha1:9d25ec388406e6faa765cf163e1e6dcb590ca0e9 + pristine_git_object: 4bb876fb3c60a42cf530c932b7c60278e6036f03 + docs/models/agenthandoffentrytype.md: + id: 07506fd159e0 + last_write_checksum: sha1:27ce9bdf225fbad46230e339a5c6d96213f1df62 + pristine_git_object: 527ebceb2ff1bbba1067f30438befd5e2c2e91d6 + docs/models/agenthandoffstartedevent.md: + id: b620102af460 + last_write_checksum: sha1:a635a7f57e197519d6c51349f6db44199f8e0d43 + pristine_git_object: 035cd02aaf338785d9f6410fde248591c5ffa5f7 + docs/models/agenthandoffstartedeventtype.md: + id: 09b09b971d58 + last_write_checksum: sha1:a3cf06d2c414b1609bdbbbd9e35c8d3f14af262a + pristine_git_object: 4ffaff15cd7b5d4b08080c4fb78e92c455c73f35 + docs/models/agentobject.md: + id: ed24a6d647a0 + last_write_checksum: sha1:ff5dfde6cc19f09c83afb5b4f0f103096df6691d + pristine_git_object: 70e143b030d3041c7538ecdacb8f5f9f8d1b5c92 + docs/models/agentsapiv1agentsdeleterequest.md: + id: 0faaaa59add9 + last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 + pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c + docs/models/agentsapiv1agentsgetrequest.md: + id: 01740ae62cff + last_write_checksum: sha1:0ed4bb58c94493e21826b38d33c2498de9150b98 + pristine_git_object: 825e03a02e14d03ce47022df840c118de8cd921f + docs/models/agentsapiv1agentslistrequest.md: + id: c2720c209527 + last_write_checksum: sha1:7e5cf3361dd00fce8468757cc73c7edb2877d582 + pristine_git_object: c4f05b5c9169300d4429e601cb70d0aa1fd88c70 + docs/models/agentsapiv1agentsupdaterequest.md: + id: 7692812cd677 + last_write_checksum: sha1:8b17ce9d488b5eab892b66ca44d0e0a01b56aa11 + pristine_git_object: f60f8e5ba0cc6923935187ba221875d757c4693e + docs/models/agentsapiv1agentsupdateversionrequest.md: + id: a001251b1624 + last_write_checksum: sha1:0ee9e0fc55fd969f2b8f2c55dec93bf10e0e5b2f + pristine_git_object: e937acc9b1d3f50eee69495b1305f7aee1c960ac + docs/models/agentsapiv1conversationsappendrequest.md: + id: 70f76380e810 + last_write_checksum: sha1:d428dc114b60362d269b5ae50a57ea60b9edee1a + pristine_git_object: ac8a00ecab30305de8eb8c7c08cda1b1c04148c3 + docs/models/agentsapiv1conversationsappendstreamrequest.md: + id: f6ada9a592c5 + last_write_checksum: sha1:8a806ca2e5bad75d9d0cf50726dc0d5b8e7e3eab + pristine_git_object: dbc330f11aa3039c9cea2dd7d477d56d5c4969d0 + docs/models/agentsapiv1conversationsdeleterequest.md: + id: c2c9f084ed93 + last_write_checksum: sha1:9ecca93f8123cebdd1f9e74cf0f4a104b46402a8 + pristine_git_object: c6eed281331cb4d2cac4470de5e04935d22eca5a + docs/models/agentsapiv1conversationsgetrequest.md: + id: d6acce23f92c + last_write_checksum: sha1:b5d5529b72c16293d3d9b5c45dcb2e3798405bcf + pristine_git_object: 67d450c88778cb27d7d0ba06d49d9f419840b32e + docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md: + id: 97b0d4a71cbc + last_write_checksum: sha1:8d3df6d122eeb58043c81e30cfa701526cc572f0 + pristine_git_object: 4bc836f353f66b0f8b24f278cc78d41dbec72e36 + docs/models/agentsapiv1conversationshistoryrequest.md: + id: e3efc36ea8b5 + last_write_checksum: sha1:4155100eaed6d3b7410b3f4476f000d1879576be + pristine_git_object: 7e5d39e9a11ac437a24b8c059db56527fa93f8b0 + docs/models/agentsapiv1conversationslistrequest.md: + id: 406c3e92777a + last_write_checksum: sha1:d5c5effcf2ca32900678d20b667bdaf8ca908194 + pristine_git_object: 62c9011faf26b3a4268186f01caf98c186e7d5b4 + docs/models/agentsapiv1conversationsmessagesrequest.md: + id: 2c749c6620d4 + last_write_checksum: sha1:781e526b030653dc189d94ca04cdc4742f9506d2 + pristine_git_object: a91ab0466d57379eacea9d475c72db9cb228a649 + docs/models/agentsapiv1conversationsrestartrequest.md: + id: 6955883f9a44 + last_write_checksum: sha1:99c1455c7fde9b82b6940e6e1ed4f363d7c38de9 + pristine_git_object: a18a41f5395adae3942573792c86ddf7c3812ff4 + docs/models/agentsapiv1conversationsrestartstreamrequest.md: + id: 0c39856fd70e + last_write_checksum: sha1:d03475c088c059077049270c69be01c67a17f178 + pristine_git_object: 7548286af5d1db51fbfd29c893eb8afdc3c97c4d + docs/models/agentscompletionrequest.md: + id: 906b82c214dc + last_write_checksum: sha1:60a969d5e54cbbb8e9296380908f1d31544e80e2 + pristine_git_object: 2a0c4144fb5919e5ce892db1210bde90820c127c + docs/models/agentscompletionrequestmessages.md: + id: 152837715a56 + last_write_checksum: sha1:338b094596f610c6eacaf0995c585f371f628f0d + pristine_git_object: d6a1e69106fc4b4804bfcc0f95e30782be40b363 + docs/models/agentscompletionrequeststop.md: + id: ad1e0e74b6b8 + last_write_checksum: sha1:b2422d4dada80d54b2dd499a6659a3894318d2c9 + pristine_git_object: 21ce6fb539238168e6d1dfc5a8206d55d33018d3 + docs/models/agentscompletionrequesttoolchoice.md: + id: bd8a6f9fbb47 + last_write_checksum: sha1:f3d9ec3c82b6bbd2c3cbc320a71b927edcc292b1 + pristine_git_object: 63b9dca9fbb8d829f93d8327a77fbc385a846c76 + docs/models/agentscompletionstreamrequest.md: + id: 21d09756447b + last_write_checksum: sha1:97372c5a10b06f826b9da6bde2b9c5f6984cc15b + pristine_git_object: b2ccd4e8fe2fc3f63d4b517f7ecfc21f3aef9d67 + docs/models/agentscompletionstreamrequestmessages.md: + id: d527345f99b1 + last_write_checksum: sha1:a5e00a940960bd6751586b92329aea797af50550 + pristine_git_object: 1bc736af55a3582a18959e445f10fc75f050476b + docs/models/agentscompletionstreamrequeststop.md: + id: 4925b6b8fbca + last_write_checksum: sha1:c9d0d73ca46643ffdf02e6c6cd35de5c39460c20 + pristine_git_object: 981005f3ff2277eae57c56787edb5f1f62d1fe46 + docs/models/agentscompletionstreamrequesttoolchoice.md: + id: b1f76f7a4e1c + last_write_checksum: sha1:843c4946d5cab61df2cba458af40835c4e8bcafe + pristine_git_object: 4354523a7d0d21721a96e91938b89236169ccced + docs/models/agenttools.md: + id: 493997aabfdb + last_write_checksum: sha1:90e3537a61b4120892a3aafe545d6bed937bf46a + pristine_git_object: 15891f566b3430e1f199da332f4531dd29002bed + docs/models/agentupdaterequest.md: + id: 75a7f820b906 + last_write_checksum: sha1:d282d1cd39ecb3c447e651a9ea25010ecfa519f7 + pristine_git_object: 641d1e406f0fba0fce9f10c16a15f883c7095c07 + docs/models/agentupdaterequesttools.md: + id: a39223b88fc9 + last_write_checksum: sha1:925ef5852c2031c9bf2608577e55edbc36708730 + pristine_git_object: 1752ee6861d23c6abaa6b748f4ff43e9545505ec + docs/models/apiendpoint.md: + id: be613fd9b947 + last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 + pristine_git_object: 8d83a26f19241da5ce626ff9526575c50e5d27be + docs/models/archiveftmodelout.md: + id: 9e855deac0d1 + last_write_checksum: sha1:ab79a7762ca33eb1f16b3ed2e5aa5318ec398829 + pristine_git_object: 46a9e755555480d333f91adfe840cdf09313e6c2 + docs/models/archiveftmodeloutobject.md: + id: 9afeccafe5b6 + last_write_checksum: sha1:4bf1b38dc9b6f275affaf353b4bf28bc63ef817c + pristine_git_object: f6f46889da24995f8e5130def3140a9fd1aff57c + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 + pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/assistantmessagerole.md: + id: bb5d2a4bc72f + last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 + pristine_git_object: 658229e77eb6419391cf7941568164541c528387 + docs/models/attributes.md: + id: ececf40457de + last_write_checksum: sha1:9f23adf16a682cc43346d157f7e971c596b416ef + pristine_git_object: 147708d9238e40e1cdb222beee15fbe8c1603050 + docs/models/audiochunk.md: + id: 88315a758fd4 + last_write_checksum: sha1:deae67e30f57eb9ae100d8c3bc26f77e8fb28396 + pristine_git_object: c443e7ade726ba88dd7ce9a8341687ef38abe598 + docs/models/audiochunktype.md: + id: cfdd0b7a74b3 + last_write_checksum: sha1:aaafb6be2f880e23fc29958389c44fd60e85f5e4 + pristine_git_object: 46ebf3729db50fd915e56124adcf63a09d93dbf4 + docs/models/audiotranscriptionrequest.md: + id: ebf59641bc84 + last_write_checksum: sha1:b76d6e7ee3f1a0ca96e1064db61896e287027711 + pristine_git_object: f2e17dd35eda24a48b0c105ecce63a73d754e051 + docs/models/audiotranscriptionrequeststream.md: + id: 79b5f721b753 + last_write_checksum: sha1:e8fc60f874bb7e8ee03c4e05bdf88b2db1afbfaf + pristine_git_object: 975e437a299efb27c069812f424a0107999de640 + docs/models/basemodelcard.md: + id: 2f62bfbd650e + last_write_checksum: sha1:7ee94bd9ceb6af84024863aa8183540bee7ffcce + pristine_git_object: 58ad5e25131804287b5f7c834afc3ad480d065a9 + docs/models/basemodelcardtype.md: + id: ac404098e2ff + last_write_checksum: sha1:b20b34e9a5f2f52d0563d8fbfa3d00042817ce87 + pristine_git_object: 4a40ce76799b5c224c5687287e8fc14857999d85 + docs/models/batcherror.md: + id: 8053e29a3f26 + last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f + pristine_git_object: 95016cdc4c6225d23edc4436e11e4a7feacf1fe6 + docs/models/batchjobin.md: + id: 10f37fc761f1 + last_write_checksum: sha1:6eeb4663e49190ceb5e473df59b61a4d9e190ea3 + pristine_git_object: 6fd0669676c45aed54c90df63029847d40cc9fe9 + docs/models/batchjobout.md: + id: 49a98e5b2aba + last_write_checksum: sha1:38af8f989eb4df095bd79f6671da3b4344fb3baa + pristine_git_object: b66fff08f4290d0dd87c4655ff48be9a8040f1a2 + docs/models/batchjoboutobject.md: + id: 8964218f4f7e + last_write_checksum: sha1:8fffd069c91ea950d321cd41994df78df3eb2051 + pristine_git_object: 64ae89654c3d1a2743e67068f66fbd56f70c14b5 + docs/models/batchjobsout.md: + id: d8041dee5b90 + last_write_checksum: sha1:619fcebe753b14a34b7d3ba56f7b45c6c2690fad + pristine_git_object: a76cfdccf96ac2adf783417444be70c5b208582b + docs/models/batchjobsoutobject.md: + id: 885adfc869d5 + last_write_checksum: sha1:3fdc878e360b22d1074bd61f95d7461d478d78a2 + pristine_git_object: d4bf9f65ae546b160dd8ec5f3ecdc4228dc91bfa + docs/models/batchjobstatus.md: + id: 7e6f034d3c91 + last_write_checksum: sha1:9e876b4b94255e1399bbb31feb51e08691bcb8fc + pristine_git_object: 64617b31488130f94bf47952ccaa4958670473c8 + docs/models/builtinconnectors.md: + id: 9d14e972f08a + last_write_checksum: sha1:1f32eb515e32c58685d0bdc15de09656194c508c + pristine_git_object: f96f50444aaa23ca291db2fd0dc69db0d9d149d9 + docs/models/chatclassificationrequest.md: + id: 57b86771c870 + last_write_checksum: sha1:2ee5fff26c780ade7ed89617358befa93a6dfd23 + pristine_git_object: 910d62ae20fc67e9a3200397aeab95513bfed90f + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:449b3e772891ec8d2ef77b6959a437514bb48d9c + pristine_git_object: d77d286eb0b2d2b018b6ff5f9617225be4fa9fa5 + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:7dce1fcd0918e2c94ad90337fb7a89179a5b8402 + pristine_git_object: 109fa7b13d19ccc85e4633e64b44613640c171fb + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:6f3ca8df1ce48dceb72547012a3e973e09a16d61 + pristine_git_object: 7d5fb411bde92e39910018cc2ad8d4d67ea980a1 + docs/models/chatcompletionstreamrequestmessages.md: + id: b343649e1a58 + last_write_checksum: sha1:04ea9c0e1abcc1956a5990847027bbbbcc778620 + pristine_git_object: 479906112d167c909301c1835df549f4a6456f95 + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/chatmoderationrequest.md: + id: 22862d4d20ec + last_write_checksum: sha1:2fb708270756e1296a063b0d12252e7a5b2fb92a + pristine_git_object: 69b6c1dc2c10abbbc2574f3782b2d85687661f11 + docs/models/chatmoderationrequestinputs.md: + id: 6d7386a07f09 + last_write_checksum: sha1:f95cffb7d88cfa238a483c949af2d386f875def2 + pristine_git_object: cf775d609e5d308ffb041deed7a70ae3f7fd70a7 + docs/models/checkpointout.md: + id: 909ce66e1f65 + last_write_checksum: sha1:89e678d55b97353ad1c3b28d9f1ab101f6be0928 + pristine_git_object: 053592d2c57c43220bec3df27cc1486554178955 + docs/models/classificationrequest.md: + id: 6f79e905a3fa + last_write_checksum: sha1:c1e5b1c1925a4b9602c5f717239e54688e90549d + pristine_git_object: 4b38c68afff1cdcfca4976de7eacb0989fc5908a + docs/models/classificationrequestinputs.md: + id: aff99510c85a + last_write_checksum: sha1:c4b52dd83924f56bef1f54c4fbbdf3cd62e96dbe + pristine_git_object: 69d75d11276f6101452a9debfa2cbcdd39333849 + docs/models/classificationresponse.md: + id: 21227dec49f2 + last_write_checksum: sha1:56756a6c0c36ce94653b676eba1f648907a87a79 + pristine_git_object: d1633ae779850cba0eac4a9c26b5b776a7b789e0 + docs/models/classificationtargetresult.md: + id: 97a5eab5eb54 + last_write_checksum: sha1:41269d1372be3523f46cb57bd19292af4971f7c0 + pristine_git_object: f3b10727b023dd83a207d955b3d0f3cd4b7479a1 + docs/models/classifierdetailedjobout.md: + id: a2084ba5cc8c + last_write_checksum: sha1:75fec933eb83e28b81aa69561d7aaf0fb79b869b + pristine_git_object: ccc88f89ed81e6e879a88b9729c4945704370fd9 + docs/models/classifierdetailedjoboutintegrations.md: + id: 3c607522e70d + last_write_checksum: sha1:e483390fb183bd1960373e4613a15ab31a52b7c7 + pristine_git_object: 5a09465ece564b1bf4dd323918a20f6747019cac + docs/models/classifierdetailedjoboutjobtype.md: + id: 176bd257be82 + last_write_checksum: sha1:ad0f41bac94d711d2b51b2ec4e09d0155db2b6eb + pristine_git_object: 0d1c6573b925e0ef836f5a607ac24f801e0d72eb + docs/models/classifierdetailedjoboutobject.md: + id: 1ca54621f5bf + last_write_checksum: sha1:5ae3d2847a66487d70bc2ff97a8c31bbbba191c7 + pristine_git_object: 08cbcffc1c60c11c07d6e8c4724f46394f7d0854 + docs/models/classifierdetailedjoboutstatus.md: + id: a98493f9d02d + last_write_checksum: sha1:3441d9961e9093d314dd1bc88df1743cd12866d2 + pristine_git_object: c3118aafa8614f20c9adf331033e7822b6391752 + docs/models/classifierftmodelout.md: + id: 268ac482c38b + last_write_checksum: sha1:77ff5ad1a9c142de2a43939be9cd3f57038a9bfc + pristine_git_object: dd9e8bf9c0ee291b44cd4f06146dea3d3280c143 + docs/models/classifierftmodeloutmodeltype.md: + id: 40536012f45c + last_write_checksum: sha1:c6fde7ce8542ba6a56a91584aa0d6b1eb99fde6d + pristine_git_object: e1e7e465378c4c0112f08dc140052fad7955995e + docs/models/classifierftmodeloutobject.md: + id: 6aa25d9fe076 + last_write_checksum: sha1:5a5fe345b3a2b3e65ce3171e8d6e9b9493ec7b06 + pristine_git_object: 9fe05bcf42325a390e5c984c7bdf346668944928 + docs/models/classifierjobout.md: + id: 2e3498af3f8c + last_write_checksum: sha1:a9706e8df1a0a569e5e42e7a1494737e391cb55a + pristine_git_object: aa1d3ca910535e283059903a2c39331673c1982b + docs/models/classifierjoboutintegrations.md: + id: 3c4aff0af3fd + last_write_checksum: sha1:b843cb1635940ff74737f92ec1ac5da893a239f2 + pristine_git_object: d938d0b991f71e46096a9b12320c6237265bd811 + docs/models/classifierjoboutjobtype.md: + id: 772280dfaefc + last_write_checksum: sha1:b809726c9edd5a47be7582eb028acbd58014b565 + pristine_git_object: 7f5236fa87ea9bb5fd93873a2d2f9a6a8c4f9456 + docs/models/classifierjoboutobject.md: + id: 04543f046d40 + last_write_checksum: sha1:96863c621ddf0425b818edcd5da32ddbd5fd1194 + pristine_git_object: 1b42d547de7bdfb109c3ff750c6754e15ec4a8c1 + docs/models/classifierjoboutstatus.md: + id: 2411c6bf3297 + last_write_checksum: sha1:6ceef218b783505231a0ec653292460e6cb1a65b + pristine_git_object: 4520f1648323227863f78f7f86b2b4567bb7ace7 + docs/models/classifiertargetin.md: + id: 90d2da204677 + last_write_checksum: sha1:18fca3deee476b3dd23d55a9a40ced96cdc21f83 + pristine_git_object: 78cab67b4ced9fd0139a1dc4e6b687de870f9c62 + docs/models/classifiertargetout.md: + id: 1ce5c0513022 + last_write_checksum: sha1:2b8ed8a25b6ea6f2717cb4edcfa3f6a1ff3e69e4 + pristine_git_object: 57535ae5cb7d30177d1800d3597fe2f6ec3ad024 + docs/models/classifiertrainingparameters.md: + id: 9370e1ccd3d5 + last_write_checksum: sha1:03f7c32717792966afdec50cb9dc1c85bb99dd84 + pristine_git_object: 3b6f3be6942bbcf56261f773864a518d16923880 + docs/models/classifiertrainingparametersin.md: + id: 8bcca130af93 + last_write_checksum: sha1:7e9d61d3377031c740ea98d6c3dc65be99dc059b + pristine_git_object: 1287c973fae9762310597fbeceaef26865ace04f + docs/models/codeinterpretertool.md: + id: f009740c6e54 + last_write_checksum: sha1:bba7c0b8f0979b0c77a31c70621dccb03d6722a5 + pristine_git_object: d5ad789ed012accaa105ced4f8dfd8e9eb83d4a3 + docs/models/codeinterpretertooltype.md: + id: d6d0f83de515 + last_write_checksum: sha1:f41ae23451c22692410340d44bcec36a1f45910b + pristine_git_object: f704b65e2842e36be4d2b96c9334cda4a6b02cde + docs/models/completionargs.md: + id: 3b54534f9830 + last_write_checksum: sha1:c0368b7c21524228939b2093ff1a4524eb57aeb7 + pristine_git_object: 60d091374a80418892df9700dc0c21e7dad28775 + docs/models/completionargsstop.md: + id: 40b0f0c81dc8 + last_write_checksum: sha1:2a576618c62d4818af0048ed3a79080149a88642 + pristine_git_object: b93f993e44a18fb0f3711163277f538cfedbf828 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completiondetailedjobout.md: + id: 634ca7241abd + last_write_checksum: sha1:b0af22a4e5eb409d6aa2a91c4ee3924d38923f5f + pristine_git_object: 84613080715078a73204d3984e7f97477ef548ae + docs/models/completiondetailedjoboutintegrations.md: + id: ecf47529e409 + last_write_checksum: sha1:5ff41070f932c911a724867a91a0a26c1d62032e + pristine_git_object: af6bbcc5f43176df2dea01a4a1a31f3c616ee3b9 + docs/models/completiondetailedjoboutjobtype.md: + id: cb794f29a3f2 + last_write_checksum: sha1:24533bc2a5bb42b560f02af4d93f008f9e5b7873 + pristine_git_object: fb24db0cc3d9495f01732bdb0e1c3df8a5865540 + docs/models/completiondetailedjoboutobject.md: + id: 8e418065aa1c + last_write_checksum: sha1:d429d772a6a4249809bbf0c26a6547e5f2de3f11 + pristine_git_object: 1bec88e5f4c5f082c53157b8ee95b4b05cb787e3 + docs/models/completiondetailedjoboutrepositories.md: + id: bb83e77df490 + last_write_checksum: sha1:dc2d60c6be1d3385d584ce9629abaaaaa46cf0ef + pristine_git_object: 4f9727c36fac5515d0afbc801904abc3652a5b20 + docs/models/completiondetailedjoboutstatus.md: + id: c606d38452e2 + last_write_checksum: sha1:1e9a5736de32a44cf539f7eaf8214aad72ec4994 + pristine_git_object: b80525bad8f6292892d8aee864a549c8ec52171c + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionftmodelout.md: + id: 93fed66a5794 + last_write_checksum: sha1:c66aecd2e10f79c84c057eeae1986e975cb40220 + pristine_git_object: cd0858258521ced3990ff393fd00c11ef0abe094 + docs/models/completionftmodeloutobject.md: + id: c6e5667c5f03 + last_write_checksum: sha1:b4cbdc01a2b439d923ad542cf852797c24d234e8 + pristine_git_object: 6f9d858caa563f4a25ae752dd40ba632ecd0af75 + docs/models/completionjobout.md: + id: 77315b024171 + last_write_checksum: sha1:bae2f49bb9064e24f886487e44ce1688993fa949 + pristine_git_object: cb471746c4f23d2ec8451f4c45bf57e2f001072f + docs/models/completionjoboutobject.md: + id: 922a1e3a4e33 + last_write_checksum: sha1:020211def2c4cd969398cf009b187ca19bd7a943 + pristine_git_object: 712b107d79a8c60c4330da4f3af307545bf1a7ec + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 + docs/models/completiontrainingparameters.md: + id: b716b0195d39 + last_write_checksum: sha1:1d8d7c469f933ea741ec15c8b9ef8b986e0ca95e + pristine_git_object: 4746a95df18c78331f572425a16b2b3dcbc2df4c + docs/models/completiontrainingparametersin.md: + id: 7223a57004ab + last_write_checksum: sha1:8f77e5fe2ce149115b0bda372c57fafa931abd90 + pristine_git_object: 9fcc714e5f000e6134f7f03f1dd4f56956323385 + docs/models/content.md: + id: bfd859c99f86 + last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 + pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:b253e4b802adb5b66d896bfc6245ac4d21a0c67c + pristine_git_object: cb7e51d3a6e05f197fceff4a4999594f3e340dac + docs/models/conversationappendrequest.md: + id: 722746e5065c + last_write_checksum: sha1:1677ab5b06748a7650464c0d7596e66e6759ede2 + pristine_git_object: 1cdb584b62423072f9a7cdc61f045b0d161525df + docs/models/conversationappendrequesthandoffexecution.md: + id: e3f56d558037 + last_write_checksum: sha1:dc71c8db746bb08f6630e995cf6af9fda747e954 + pristine_git_object: 7418b36a55fab959639aec456a946600eb908efb + docs/models/conversationappendstreamrequest.md: + id: e9f8131435e8 + last_write_checksum: sha1:559d90bbf6d64f46221edaa6482837f0ee3b0626 + pristine_git_object: a8516ea7fc7db1d6bc0abb8f99b967a1715ceb4b + docs/models/conversationappendstreamrequesthandoffexecution.md: + id: 5739ea777905 + last_write_checksum: sha1:c85584b63c0c5d859ee5d46d6ae167a8ee44e279 + pristine_git_object: 1bbced3e61a521401ae93a7b1f73d0e9c061e5fd + docs/models/conversationevents.md: + id: be63cc7c526e + last_write_checksum: sha1:1667c767ef53fd7aef90452fde2a8245ed2b2ae6 + pristine_git_object: f1e2c4e90181ff729d3fdb37b0135e9bbd095c04 + docs/models/conversationeventsdata.md: + id: d4907b066f4b + last_write_checksum: sha1:f58b7f3e738c2d0146b228076a5dc0c6cf84ffb1 + pristine_git_object: 5452d7d5ce2aa59a6d89c7b7363290e91ed8a0a3 + docs/models/conversationhistory.md: + id: 7e97e8e6d6e9 + last_write_checksum: sha1:cc6b40d6e6ff923555e959be5ef50a00c73154a7 + pristine_git_object: ebb1d5136cebf2bc9b77047fe83feecc68532d03 + docs/models/conversationhistoryobject.md: + id: 088f7df6b658 + last_write_checksum: sha1:bcce4ef55e6e556f3c10f65e860faaedc8eb0671 + pristine_git_object: a14e7f9c7a392f0d98e79cff9cc3ea54f30146fa + docs/models/conversationinputs.md: + id: 23e3160b457d + last_write_checksum: sha1:0c6abaa34575ee0eb22f12606de3eab7f4b7fbaf + pristine_git_object: 86db40ea1390e84c10a31155b3cde9066eac23b0 + docs/models/conversationmessages.md: + id: 46684ffdf874 + last_write_checksum: sha1:01ccdc4b509d5f46ff185f686d332587e25fc5b7 + pristine_git_object: c3f00979b748ad83246a3824bb9be462895eafd6 + docs/models/conversationmessagesobject.md: + id: b1833c3c20e4 + last_write_checksum: sha1:bb91a6e2c89066299660375e5e18381d0df5a7ff + pristine_git_object: db3a441bde0d086bccda4814ddfbf737539681a6 + docs/models/conversationrequest.md: + id: dd7f4d6807f2 + last_write_checksum: sha1:4ecca434753494ff0af66952655af92293690702 + pristine_git_object: 04378ae34c754f2ed67a34d14923c7b0d1605d4e + docs/models/conversationresponse.md: + id: 2eccf42d48af + last_write_checksum: sha1:69059d02d5354897d23c9d9654d38a85c7e0afc6 + pristine_git_object: 38cdadd0055d457fa371984eabcba7782e130839 + docs/models/conversationresponseobject.md: + id: 6c028b455297 + last_write_checksum: sha1:76270a07b86b1a973b28106f2a11673d082a385b + pristine_git_object: bea66e5277feca4358dd6447959ca945eff2171a + docs/models/conversationrestartrequest.md: + id: 558e9daa00bd + last_write_checksum: sha1:97c25a370411e1bce144c61272ca8f32066112be + pristine_git_object: f389a1e5c42cf0f73784d5563eaa6d0b29e0d69e + docs/models/conversationrestartrequesthandoffexecution.md: + id: faee86c7832c + last_write_checksum: sha1:44728be55e96193e6f433e2f46f8f749f1671097 + pristine_git_object: 5790624b82ce47ea99e5c25c825fbc25145bfb8e + docs/models/conversationrestartstreamrequest.md: + id: 01b92ab1b56d + last_write_checksum: sha1:90f0ab9aba1919cbc2b9cfc8e5ec9d80f8f3910c + pristine_git_object: d7358dc20b2b60cb287b3c4a1c174a7883871a54 + docs/models/conversationrestartstreamrequesthandoffexecution.md: + id: 3e9c4a9ab94d + last_write_checksum: sha1:300e197f11ad5efc654b51198b75049890258eef + pristine_git_object: 97266b43444f5ed50eeedf574abd99cb201199fd + docs/models/conversationstreamrequest.md: + id: 833f266c4f96 + last_write_checksum: sha1:b7196c9194bc5167d35d09774a3f26bc7d543790 + pristine_git_object: e403db68e7932f60b1343d9282e2c110414486ce + docs/models/conversationstreamrequesthandoffexecution.md: + id: e6701e5f9f0c + last_write_checksum: sha1:ef2ebe8f23f27144e7403f0a522326a7e4f25f50 + pristine_git_object: c98e194c1d204c3a5d4234f0553712a7025d7f85 + docs/models/conversationstreamrequesttools.md: + id: 83ea0526da4e + last_write_checksum: sha1:c445fc14cbb882871a83990943569bdf09a662f9 + pristine_git_object: 700c844876754e85428898f6cabda8fb0dedf114 + docs/models/conversationusageinfo.md: + id: 57ef89d3ab83 + last_write_checksum: sha1:d92408ad37d7261b0f83588e6216871074a50225 + pristine_git_object: 57e260335959c605a0b9b4eaa8bf1f8272f73ae0 + docs/models/data.md: + id: 9a31987caf78 + last_write_checksum: sha1:da040f995f799c04214eff92982dd8d6c057ae93 + pristine_git_object: 95dc8d28aa4669513ae0f255c81aadaf3d793370 + docs/models/deletefileout.md: + id: c7b84242a45c + last_write_checksum: sha1:f2b039ab88fc83ec5dd765cab8e2ed8cce7e417d + pristine_git_object: 4709cc4958d008dc24430deb597f801b91c6957f + docs/models/deletemodelout.md: + id: 5643e76768d5 + last_write_checksum: sha1:1593c64f7673e59b7ef1f4ae9f5f6b556dd6a269 + pristine_git_object: 5fd4df7a7013dcd4f6489ad29cdc664714d32efd + docs/models/deletemodelv1modelsmodeliddeleterequest.md: + id: c838cee0f093 + last_write_checksum: sha1:e5b6d18b4f8ab91630ae34a4f50f01e536e08d99 + pristine_git_object: d9bc15fe393388f7d0c41abce97ead17e35e2ba4 + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 + pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + docs/models/document.md: + id: cd1d2a444370 + last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 + pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 + docs/models/documentlibrarytool.md: + id: 68083b0ef8f3 + last_write_checksum: sha1:5f21be0a248ff4dedc26908b9ee0039d7ac1421c + pristine_git_object: 82315f32b920d32741b2e53bc10e411f74a85602 + docs/models/documentlibrarytooltype.md: + id: 23c5ba5c4b3f + last_write_checksum: sha1:bcb58941aafaca2b8ad6e71425d5f16e881b4f97 + pristine_git_object: ebd420f69a4ace05daa7edd82b9315b2a4354b5f + docs/models/documentout.md: + id: a69fd1f47711 + last_write_checksum: sha1:ed446078e7194a0e44e21ab1af958d6a83597edb + pristine_git_object: 28df11eb1aef1fdaf3c1103b5d61549fb32ea85d + docs/models/documenttextcontent.md: + id: 29587399f346 + last_write_checksum: sha1:93382da0228027a02501abbcf681f247814d3d68 + pristine_git_object: 989f49e9bcb29f4127cb11df683c76993f14eba8 + docs/models/documentupdatein.md: + id: 185ab27259a7 + last_write_checksum: sha1:e0faccd04229204968dbc4e8131ee72f81288182 + pristine_git_object: 0993886d56868aba6844824f0e0fdf1bdb9d74f6 + docs/models/documenturlchunk.md: + id: 48437d297408 + last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 + pristine_git_object: 6c9a5b4d9e6769be242b27ef0208f6af704689c0 + docs/models/documenturlchunktype.md: + id: a3574c91f539 + last_write_checksum: sha1:a0134fc0ea822d55b1204ee71140f2aa9d8dbe9c + pristine_git_object: 32e1fa9e975a3633fb49057b38b0ea0206b2d8ef + docs/models/embeddingdtype.md: + id: 22786e732e28 + last_write_checksum: sha1:dbd16968cdecf706c890769d8d1557298f41ef71 + pristine_git_object: 01656b0a85aa87f19909b18100bb6981f89683fc + docs/models/embeddingrequest.md: + id: bebee24421b4 + last_write_checksum: sha1:6c5f4ecfde8ac0e28e37ea78d7237f42013f4bde + pristine_git_object: 0f2fc6a666833101ba35fa641657353ca75a0ad6 + docs/models/embeddingrequestinputs.md: + id: 6a35f3b1910a + last_write_checksum: sha1:e12ca056fac504e5af06a304d09154d3ecd17919 + pristine_git_object: 527a089b38b5cd316173ced4dc74a1429c8e4406 + docs/models/embeddingresponse.md: + id: 31cd0f6b7bb5 + last_write_checksum: sha1:1d7351c68b075aba8e91e53d29bdab3c6dd5c3a2 + pristine_git_object: 2bd85b4d245978ec396da067060cfe892f19c64f + docs/models/embeddingresponsedata.md: + id: 89b078acdc42 + last_write_checksum: sha1:e3e9200948f864382e0ecd3e04240b13d013141a + pristine_git_object: 20b50618ac99c63f7cf57fe4377840bfc1f85823 + docs/models/encodingformat.md: + id: 066e154e4d43 + last_write_checksum: sha1:8d6c4b29dea5ff7b0ae2b586951308fad99c60eb + pristine_git_object: 7d5941cfe6cea2e85b20d6fb0031e9b807bac471 + docs/models/entitytype.md: + id: 130a2f7038b0 + last_write_checksum: sha1:01c3c10e737bcd58be70b437f7ee74632972a983 + pristine_git_object: 7c040b382d4c1b6bc63f582566d938be75a5f954 + docs/models/entries.md: + id: 93dc7a28346c + last_write_checksum: sha1:c6c61c922df17562e9ca5d8d2d325579db5c88bc + pristine_git_object: 8e5a20d052c47008b8a399b7fb740bece3b35386 + docs/models/eventout.md: + id: 9960732c3718 + last_write_checksum: sha1:dbc23814b2e54ded4aa014d63510b3a2a3259329 + pristine_git_object: d9202353be984d51b9c05fb0f490053ce6ccfe4a + docs/models/file.md: + id: 4ad31355bd1c + last_write_checksum: sha1:ade4d3c908c664a07a3c333cc24bc1bfb43ab88b + pristine_git_object: 37cc418f9e5189c18f312c42060fd702e2963765 + docs/models/filechunk.md: + id: edc076728e9d + last_write_checksum: sha1:07ab5db503211adba2fa099e66d12ac3c4bbf680 + pristine_git_object: 18217114060ac4e4b45fefabace4628684f27e5c + docs/models/filepurpose.md: + id: ed6216584490 + last_write_checksum: sha1:02767595f85228f7bfcf359f8384b8263580d53a + pristine_git_object: 14cab13ee191ae60e2c5e1e336d0a5abc13f778b + docs/models/filesapiroutesdeletefilerequest.md: + id: 7fdf9a97320b + last_write_checksum: sha1:411e38d0e08a499049796d1557f79d669fc65107 + pristine_git_object: 1b02c2dbb7b3ced86ddb49c2323d1d88732b480c + docs/models/filesapiroutesdownloadfilerequest.md: + id: b9c13bb26345 + last_write_checksum: sha1:1f41dad5ba9bd63881de04d24ef49a0650d30421 + pristine_git_object: 8b28cb0e5c60ac9676656624eb3c2c6fdc8a3e88 + docs/models/filesapiroutesgetsignedurlrequest.md: + id: 08f3772db370 + last_write_checksum: sha1:26aa0140444ccef7307ef6f236932032e4784e8f + pristine_git_object: dbe3c801003c7bb8616f0c5be2dac2ab1e7e9fb1 + docs/models/filesapirouteslistfilesrequest.md: + id: 04bdf7c654bd + last_write_checksum: sha1:258317fd5c0738cff883f31e13393ac64f817a6f + pristine_git_object: 3801a96e19f149a665bde4890e26df54d7f07d77 + docs/models/filesapiroutesretrievefilerequest.md: + id: 2783bfd9c4b9 + last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab + pristine_git_object: 961bae1f51a4ae9df21b28fd7a5ca91dc7b3888b + docs/models/filesapiroutesuploadfilemultipartbodyparams.md: + id: 558bf53c7b65 + last_write_checksum: sha1:de3f26e8bd89aae0e2c2078b9e1f7f47adccafbd + pristine_git_object: a5dd1174ab987e511d70a0f8fdaefbeaeda18c43 + docs/models/fileschema.md: + id: 9a05a660399d + last_write_checksum: sha1:97987d64285ff3092635754c78ad7b68d863e197 + pristine_git_object: 4f3e72dba17a964155007755ad9d69f0304b2adb + docs/models/filesignedurl.md: + id: c0a57176d62e + last_write_checksum: sha1:2c64ef5abc75e617496f0a28d3e1cebfe269a6b9 + pristine_git_object: 52ce3f4f0c44df0ef3ed1918f92ad63f76ffc144 + docs/models/fimcompletionrequest.md: + id: b44677ecc293 + last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 + pristine_git_object: fde0b625c29340e8dce1eb3026ce644b1885e53a + docs/models/fimcompletionrequeststop.md: + id: ea5475297a83 + last_write_checksum: sha1:a6cdb4bda01ac58016a71f35da48a5d10df11623 + pristine_git_object: a0dbb00a82a03acc8b62b81d7597722a6ca46118 + docs/models/fimcompletionresponse.md: + id: 050d62ba2fac + last_write_checksum: sha1:a6101a69e83b7a5bcf96ec77ba1cab8748f734f4 + pristine_git_object: cd62d0349503fd8b13582d0ba47ab9cff40f6b28 + docs/models/fimcompletionstreamrequest.md: + id: c881d7e27637 + last_write_checksum: sha1:f8755bc554dd44568c42eb5b6dde04db464647ab + pristine_git_object: ba62d854f030390418597cbd8febae0e1ce27ea8 + docs/models/fimcompletionstreamrequeststop.md: + id: c97a11b764e9 + last_write_checksum: sha1:958d5087050fdeb128745884ebcf565b4fdc3886 + pristine_git_object: 5a9e2ff020d4939f7fd42c0673ea7bdd16cca99d + docs/models/finetuneablemodeltype.md: + id: e16926b57814 + last_write_checksum: sha1:52006811b756ff5af865ed6f74838d0903f0ee52 + pristine_git_object: 34b24bd4db1ad3f9e77e2c6a45a41d2fbc5cf7fd + docs/models/finishreason.md: + id: 73315c2a39b3 + last_write_checksum: sha1:dc258e82af5babd6efabadb20cd6e2f9663dbb64 + pristine_git_object: 2af53f6e55b74455a696c17ab00ba626a1c3711f + docs/models/format_.md: + id: a17c22228eda + last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 + pristine_git_object: 97d286a4ed7cff0a4058bbfa06c4573428182876 + docs/models/ftclassifierlossfunction.md: + id: b546cfde5aa6 + last_write_checksum: sha1:752d9d238a90a3ef55205576fa38cee56ea1539e + pristine_git_object: 919cdd384315c99d4b590bc562298403733344ce + docs/models/ftmodelcapabilitiesout.md: + id: f7be0dd1d889 + last_write_checksum: sha1:670412a0c0268f646dd444537bd79ce9440170c8 + pristine_git_object: 19690476c64ac7be53f974347c1618730f0013ce + docs/models/ftmodelcard.md: + id: 15ed6f94deea + last_write_checksum: sha1:2dccc70020274152bb8a76f0f7699694f8683652 + pristine_git_object: 35032775db8ae6f4c6fbac309edacd27ee7868af + docs/models/ftmodelcardtype.md: + id: e2ba85c02d1c + last_write_checksum: sha1:f6a718013be6a8cb340f58f1ff7b919217594622 + pristine_git_object: 0b38470b9222df6c51baef2e7e9e10c0156a2e05 + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functioncallentry.md: + id: 016986b7d6b0 + last_write_checksum: sha1:bd3e67aea9eb4f70064e67e00385966d44f73f24 + pristine_git_object: fd3aa5c575019d08db258842262e8814e57dc6d5 + docs/models/functioncallentryarguments.md: + id: c4c609e52680 + last_write_checksum: sha1:ae88aa697e33d60f351a30052aa3d6e2a8a3e188 + pristine_git_object: f1f6e39e724673556a57059a4dbda24f31a4d4b9 + docs/models/functioncallentryobject.md: + id: ea634770754e + last_write_checksum: sha1:d6bc885e9689397d4801b76c1a3c8751a75cf212 + pristine_git_object: 3cf2e427bfb6f2bc7acea1e0c6aafe965187f63f + docs/models/functioncallentrytype.md: + id: b99da15c307b + last_write_checksum: sha1:04665a6718ad5990b3beda7316d55120fbe471b0 + pristine_git_object: 7ea34c5206bdf205d74d8d49c87ddee5607582e9 + docs/models/functioncallevent.md: + id: cc9f2e603464 + last_write_checksum: sha1:c3a6a7ce8af38d7ba7a2ece48c352eed95edc578 + pristine_git_object: c25679a5d89745c1e186cdeb72fda490b2f45af2 + docs/models/functioncalleventtype.md: + id: 1aab7a86c5d6 + last_write_checksum: sha1:61d480f424df9a74a615be673cae4dcaf7875d81 + pristine_git_object: 8cf3f03866d72ac710015eec57d6b9caa079022e + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/functionresultentry.md: + id: 24d4cb18998c + last_write_checksum: sha1:528cae03e09e43bdf13e1a3fef64fd9ed334319b + pristine_git_object: 6df54d3d15e6d4a03e9af47335829f01a2226108 + docs/models/functionresultentryobject.md: + id: 025dc546525c + last_write_checksum: sha1:01a0085fb99253582383dd3b12a14d19c803c33c + pristine_git_object: fe52e0a5a848ea09dfb4913dd8d2e9f988f29de7 + docs/models/functionresultentrytype.md: + id: 69651967bdee + last_write_checksum: sha1:41489b0f727a00d86b313b8aefec85b4c30c7602 + pristine_git_object: 35c94d8e553e1cb641bef28fec2d8b3576d142f6 + docs/models/functiontool.md: + id: 5fb499088cdf + last_write_checksum: sha1:f616c6de97a6e0d622b16b99f95c2c5a94661789 + pristine_git_object: 8c42459304100777cf85416a5c3a984bc0e7a7ca + docs/models/functiontooltype.md: + id: bc0bcbe69ad9 + last_write_checksum: sha1:c0fae17a8e5a9b7240ff16af7eef9fb4782fe983 + pristine_git_object: 9c095625b60f1e2e0fd09b08e3ba315545d6a036 + docs/models/githubrepositoryin.md: + id: b42209ef8423 + last_write_checksum: sha1:fece86cdee3ba3a5719244a953193ed2f7b982f7 + pristine_git_object: 1584152ba934756793d5228d5691c07d3256c7b8 + docs/models/githubrepositoryintype.md: + id: e2f2ca622221 + last_write_checksum: sha1:349dc9c6e4db5ec5394c8649c3b872db3545c182 + pristine_git_object: 63da967cb7a75ec328f9b9fbd1062e43f2cabc07 + docs/models/githubrepositoryout.md: + id: 0ca86e122722 + last_write_checksum: sha1:f6ffda992af75d3f95751106db1b0f0c82a2eca7 + pristine_git_object: 03f0b2661e46b48489ede1208d9c38c4324b2b35 + docs/models/githubrepositoryouttype.md: + id: f3ab58fa1b0e + last_write_checksum: sha1:8f26cd692f499279b9c4182010d56c75374ed9ec + pristine_git_object: 46c3eefd1d67ea6968a3c7025e6dc27e8f0f1ac5 + docs/models/handoffexecution.md: + id: d0b2e094fa39 + last_write_checksum: sha1:1d8fafc8105b6c15e50620353c0457b629951804 + pristine_git_object: 61e7dade49090096a49d99b5c8291f629fd43c4e + docs/models/httpvalidationerror.md: + id: a211c095f2ac + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/hyperparameters.md: + id: c167bad5b302 + last_write_checksum: sha1:5b7f76360dea58be5350bbe074482da45e57599c + pristine_git_object: 46a6dd6baa1b1574bad5eadc1e83d4b72d56c0c8 + docs/models/imagegenerationtool.md: + id: d5deb6b06d28 + last_write_checksum: sha1:8596d0119712e68b1deafd18860ed6ed452a31fa + pristine_git_object: b8fc9cf40c8cb010231837ffe3d66cb3762dd666 + docs/models/imagegenerationtooltype.md: + id: fc670aabaff7 + last_write_checksum: sha1:234109f99f467905e6e7b74036e2c395090840e4 + pristine_git_object: 29681b58e1afe945faa76f9dd424deb01cdfb1bd + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 + pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 + pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 + docs/models/imageurlchunkimageurl.md: + id: c7fae88454ce + last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 + pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 + docs/models/imageurlchunktype.md: + id: b9af2db9ff60 + last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 + pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + docs/models/inputentries.md: + id: a5c647d5ad90 + last_write_checksum: sha1:4231bb97837bdcff4515ae1b00ff5e7712256e53 + pristine_git_object: b44a467d258cfa8cc3d2a3236330471dbc3af109 + docs/models/inputs.md: + id: 4b0a7fb87af8 + last_write_checksum: sha1:19d8da9624030a47a3285276c5893a0fc7609435 + pristine_git_object: 0f62a7ce8e965d0879507e98f808b9eb254282a6 + docs/models/instructrequest.md: + id: a0034d7349a2 + last_write_checksum: sha1:91c446be8428efd44163ed8366a37c376554211a + pristine_git_object: 9500cb588b5d27d934b04cc5fa0be26a270f6d82 + docs/models/instructrequestinputs.md: + id: 2a677880e32a + last_write_checksum: sha1:1b989ef7ef4c84f59c83af11b3243d934c85e348 + pristine_git_object: 4caa028f85be2324966e61321c917cbd0c65de01 + docs/models/instructrequestinputsmessages.md: + id: c0cb1f866e69 + last_write_checksum: sha1:558f78fafbd44c5ea7030491a39d0c7ccd994d01 + pristine_git_object: 237e131f1b1161c8b90df11d49739f5bfe9ee829 + docs/models/instructrequestmessages.md: + id: 639538e7d70d + last_write_checksum: sha1:8c26b3b97f095e5c525b0e3c18d45aded9bd03a2 + pristine_git_object: 9c866a7db86b40e997cb3f06d68e67eb033f3360 + docs/models/integrations.md: + id: f9eb2b4df2f8 + last_write_checksum: sha1:e0b12cf5661d4e6332da28913c5394e5a85071bf + pristine_git_object: 35214d63ef2b902aa39bfdd2fd6dc5f319cc203b + docs/models/jobin.md: + id: 1b7b37214fa8 + last_write_checksum: sha1:6dadb7d78e2dc04966bd041ddb54428108098f76 + pristine_git_object: b96517705cea7b9efd266f146080ad1aed3cc8cb + docs/models/jobinintegrations.md: + id: 5f293420eced + last_write_checksum: sha1:288931c5427e1a435b1396e131e95a43cbcbc2b9 + pristine_git_object: 91c102426d05b4f88ca5a661f53f1acf316b5b88 + docs/models/jobinrepositories.md: + id: 5c94c2d28ce8 + last_write_checksum: sha1:e7fbe667fa5703dedd78672d936f1b02caf301b5 + pristine_git_object: b94477af4c51c7939fd6dcdb75cbc56459d4a30a + docs/models/jobmetadataout.md: + id: 30eb634fe247 + last_write_checksum: sha1:46d54b6f6004a6e571afd5207db5170dfbce7081 + pristine_git_object: 6218a161b71abbb35eb4ca6e3ce664226983efc2 + docs/models/jobsapiroutesbatchcancelbatchjobrequest.md: + id: 798cb1ca1385 + last_write_checksum: sha1:67e8bda117608aee0e09a702a1ef8a4b03c40b68 + pristine_git_object: c19d0241784ff69bc68a11f405437400057d6f62 + docs/models/jobsapiroutesbatchgetbatchjobrequest.md: + id: e83a7ec84f8a + last_write_checksum: sha1:cd1ee554b9cfd1f83ecacff1bebee8abb6a62370 + pristine_git_object: 3930aacd4130f27dd680a05964b42a08aa5c55ff + docs/models/jobsapiroutesbatchgetbatchjobsrequest.md: + id: 5b9c44ad4d31 + last_write_checksum: sha1:8e28b08c86355b097836e55559fda85487000092 + pristine_git_object: b062b8731ca7c99af968be2e65cca6aa5f122b37 + docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md: + id: 8eb8c127091e + last_write_checksum: sha1:2b93a6bed5743461bb03c8337fb25dfc5a15522e + pristine_git_object: f9700df50b8f512c4139c1830aba18989d022b8e + docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md: + id: deff83b39b78 + last_write_checksum: sha1:dac8d8f2e95aed2db9b46711e6e80816881d5d14 + pristine_git_object: 883cbac685563d2e0959b63638f6b967ebdf1ee9 + docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md: + id: c45757ba1ed9 + last_write_checksum: sha1:52d4f945aff24c03627111d0e7c73cbbba60129f + pristine_git_object: 1b331662b17cd24c22e88b01bf00d042cb658516 + docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md: + id: 8aa8030f26d7 + last_write_checksum: sha1:ebc6ac03e99d69fed1bae6cb4e858e0aecf2dd88 + pristine_git_object: eeddc3cdfdd975cdb69fbfcd306e9445010eb82f + docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md: + id: a9b75762e534 + last_write_checksum: sha1:8f1395447928e089c88dce8c0ced1030ec5f0eba + pristine_git_object: fde19800303a901149bf39c5330ef8c4da87df62 + docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md: + id: c0b31f4fc621 + last_write_checksum: sha1:6f70f5cabb62e2df7c1e4086f7a8b100143cc2aa + pristine_git_object: e0d2e3610ce460d834c2d07d9a34b09f8257217b + docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md: + id: 52078f097503 + last_write_checksum: sha1:5d8fe21d292264209508ae484a7e88d33bff373f + pristine_git_object: 3dca3cd85245e0956b557fc5d6ae6c5e265df38d + docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md: + id: b4e2b814d8c3 + last_write_checksum: sha1:f13b5c8f2e74cc73b58a30d366032c764603f95e + pristine_git_object: 4429fe480ab9486de98940a119ac63f40045313b + docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md: + id: cfd848845787 + last_write_checksum: sha1:b3a64f467ab1c16427ef77d3acb0749ab155e213 + pristine_git_object: 64f4cca608f8e505f9eeaac623955200dd5b9553 + docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md: + id: 75b5dd1bcbaa + last_write_checksum: sha1:dd30e7ff8748d26497458f3398c0547113dc058f + pristine_git_object: 95c1734daa7164bedeeb1fa58dd792939f25bc17 + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md: + id: 60bd2e28993a + last_write_checksum: sha1:7ff770c3d0148a4818957b279875bbe5b1ecfc62 + pristine_git_object: 6d93832e68739e465de7c61993b8bcfa1468bafc + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md: + id: c265a30fd4cf + last_write_checksum: sha1:e1a739e755b4e573f592743cd34116da97a67450 + pristine_git_object: 54f4c3981978e1ac4bdf42d5b746b73a62d13162 + docs/models/jobsout.md: + id: cbe31f43047d + last_write_checksum: sha1:73e1ce0ff11741c22dc00d768055ad603034147c + pristine_git_object: 977013f7a679dd89fb48c4a95b266a9ea5f3f7cf + docs/models/jobsoutdata.md: + id: 809574cac86a + last_write_checksum: sha1:06455044d314c4edbd1ce4833d551c10918f0a3e + pristine_git_object: 28cec31117416b79eb8688d84b47b157974574cc + docs/models/jobsoutobject.md: + id: 1c99619e2435 + last_write_checksum: sha1:cffbcfb8673e12feb8e22fd397bf68c8745c76bb + pristine_git_object: f6c8a2c3079003a885ee9bdfc73cf7c7c7d8eded + docs/models/jobtype.md: + id: 86685dbc7863 + last_write_checksum: sha1:da927d34a69b0b2569314cc7a62733ee1ab85186 + pristine_git_object: 847c662259537ed54cc108e8de8d8eb93defbe58 + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/legacyjobmetadataout.md: + id: b3b8c262f61a + last_write_checksum: sha1:bc611bf233bd5b224b1367c6b800de6c3b589b38 + pristine_git_object: 53a45485b70017e729709359407d6c9f3e0fbe35 + docs/models/legacyjobmetadataoutobject.md: + id: 5bafaafb6137 + last_write_checksum: sha1:30e5942a6d0c9fde35d29cd9d87a4304b0e4fa26 + pristine_git_object: 9873ada894f79647c05e386521c6b4208d740524 + docs/models/librariesdeletev1request.md: + id: c0c3b2e1aabc + last_write_checksum: sha1:bef84f8851b06d2d914b605f11109de1850d0294 + pristine_git_object: 68d7e54369ce75422bf8b0ff16cada1c0ae2b05c + docs/models/librariesdocumentsdeletev1request.md: + id: 9d557bd7d1cc + last_write_checksum: sha1:1b580b657559356886915ee5579b90a03db19337 + pristine_git_object: efccdb1bbc36cf644ed2d1716cbd202e6d6bf6c5 + docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md: + id: 27ad38ce4cb1 + last_write_checksum: sha1:b35ad610330232b395b5f87cc15f6ae270de6816 + pristine_git_object: 14ca66f72693f1df05eb93e0cca45f440b62d282 + docs/models/librariesdocumentsgetsignedurlv1request.md: + id: 4498715b6cfb + last_write_checksum: sha1:31f78079e31e070d080c99555cd2d85318fc4610 + pristine_git_object: 7c08c180d59a8e8475fea89424b8b2021d51385f + docs/models/librariesdocumentsgetstatusv1request.md: + id: c2219d3a3738 + last_write_checksum: sha1:44e79df94cf2686e83d7a2e793140a6a7b3a1c05 + pristine_git_object: e6d41875966348fd9e770d06c8099e48f0e64b5d + docs/models/librariesdocumentsgettextcontentv1request.md: + id: 850dfa465952 + last_write_checksum: sha1:4a1212e111525f4265d2924ce52f9c13d2787d4d + pristine_git_object: 2f58a4460ccdad531391318c62191e76c1ec22ac + docs/models/librariesdocumentsgetv1request.md: + id: cdd0df2f7e9d + last_write_checksum: sha1:36e5ef39552159044ecd28d20ee0792ea5bcadef + pristine_git_object: 6febc058425bb38857c391ee4c40d600858e6058 + docs/models/librariesdocumentslistv1request.md: + id: 7b5756e50d64 + last_write_checksum: sha1:2605b7972a3d7b4f73ab8052be4bf740f44f6f6f + pristine_git_object: 44f6300115853053214639982516a60b3268e778 + docs/models/librariesdocumentsreprocessv1request.md: + id: 1b8bf57b3f0a + last_write_checksum: sha1:8528785c1b4ae18d6ec6f261d29d5daac0d420a3 + pristine_git_object: 196ba17b749ce9efc1c30189864e474896814f85 + docs/models/librariesdocumentsupdatev1request.md: + id: b9147b1c0e38 + last_write_checksum: sha1:45b2cc114886b300e3b996a8b71241ac5c7260a3 + pristine_git_object: 2f18b014af4577a0ae862dfeea599d5f700005cb + docs/models/librariesdocumentsuploadv1documentupload.md: + id: c76458963b1c + last_write_checksum: sha1:6973cb619a8e50bb12e96cffdc6b57fcf7add000 + pristine_git_object: a0ba95da33a248fd639ca1af5f443fd043dae0ea + docs/models/librariesdocumentsuploadv1request.md: + id: 89a89d889c72 + last_write_checksum: sha1:4f67f0bc5b2accb6dcf31ce7be0e9447ab4da811 + pristine_git_object: 7c91ca9b92839be8ab1efb4428cc8d7a78d57e1e + docs/models/librariesgetv1request.md: + id: f47ad71ec7ca + last_write_checksum: sha1:3b2bf1e4f6069d0c954e1ebf95b575a32c4adeac + pristine_git_object: 6e1e04c39c15a85d96710f8d3a8ed11a22412816 + docs/models/librariessharecreatev1request.md: + id: 99e7bb8f7fed + last_write_checksum: sha1:e40d710ad1023768a0574b3283ef35544f6b0088 + pristine_git_object: 4c05241de4ee5a76df335ae9ea71004bd02b8669 + docs/models/librariessharedeletev1request.md: + id: bc8adba83f39 + last_write_checksum: sha1:79fc5a9a3cee5b060f29edd95f00e0fea32579cf + pristine_git_object: 850e22ab79863ba544f453138322c0eb5bf544cd + docs/models/librariessharelistv1request.md: + id: 86e6f08565e2 + last_write_checksum: sha1:6f2ffff66fa5fb141d930bca7bb56e978d62b4a5 + pristine_git_object: 98bf6d17ab013c1dd3f0ab18c37bbfc1a63f1b76 + docs/models/librariesupdatev1request.md: + id: f7e51b528406 + last_write_checksum: sha1:cec4aa232c78ca2bd862aee3d5fb3bcc2ad9dc05 + pristine_git_object: a68ef7a8f52ee4a606cb88d0a3f96de8c2fbccb8 + docs/models/libraryin.md: + id: a08170e6397c + last_write_checksum: sha1:2c996ecf1ae5d9e8df702a79741b72b3571eb6ef + pristine_git_object: d6b119148725627bcf76594c4a24e915399cd8f8 + docs/models/libraryinupdate.md: + id: 6d06b6b21498 + last_write_checksum: sha1:4ec01d7f7e24f58a74613d4847725bfd516b7d7f + pristine_git_object: 4aa169c7669c00fcedc423fbff6f386697360787 + docs/models/libraryout.md: + id: 2e8b6d91ded2 + last_write_checksum: sha1:d71053b44725147265871be445217e3e1a0e5ede + pristine_git_object: ebf46d57de6bad7022a3e8cb8eaf88728bbbe888 + docs/models/listdocumentout.md: + id: 4bec19e96c34 + last_write_checksum: sha1:c0b3a6e3841f120c52b1d7718d7226a52fe1b6d6 + pristine_git_object: f14157b8db55c1201d9f7151742e9ddf0d191c16 + docs/models/listfilesout.md: + id: 98d4c59cc07e + last_write_checksum: sha1:e76df31628984095f1123005009ddc4b59b1c2bc + pristine_git_object: bcb1f13aa17f41dadb6af37541e929364e2d6cec + docs/models/listlibraryout.md: + id: ea34f8548bd6 + last_write_checksum: sha1:cec920357bc48bea286c05d16c480a9a9369b459 + pristine_git_object: db76ffa10eb97f143ad4a6930e520e389fe18153 + docs/models/listsharingout.md: + id: a3249129f37e + last_write_checksum: sha1:4831e4f02e1d5e86f138c7bb6b04d095aa4df30f + pristine_git_object: bcac4834f3bd008868435189f40bbf9e368da0d2 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/messageentries.md: + id: 9af3a27b862b + last_write_checksum: sha1:a3eb6e37b780644313738f84e6c5ac653b4686bc + pristine_git_object: 76256fb913376a15d5bcd2531b18f1a78b980c9d + docs/models/messageinputcontentchunks.md: + id: 34aac9c271db + last_write_checksum: sha1:641cd1dba3721f85b049c5ee514879f067483949 + pristine_git_object: 4fd18a0dcb4f6af4a9c3956116f8958dc2fa78d1 + docs/models/messageinputentry.md: + id: eb74af2b9341 + last_write_checksum: sha1:a65737ba7d9592ff91b42689c5c98fca8060d868 + pristine_git_object: d55eb8769c3963518fcbc910d2e1398b6f46fd87 + docs/models/messageinputentrycontent.md: + id: 7e12c6be6913 + last_write_checksum: sha1:6be8be0ebea2b93712ff6273c776ed3c6bc40f9a + pristine_git_object: 65e55d97606cf6f3119b7b297074587e88d3d01e + docs/models/messageinputentryrole.md: + id: 2497d07a793d + last_write_checksum: sha1:a41eb58f853f25489d8c00f7a9595f443dcca2e6 + pristine_git_object: f2fdc71d8bc818b18209cd1834d4fead4dfd3ba6 + docs/models/messageinputentrytype.md: + id: 5d2a466dad0f + last_write_checksum: sha1:19f689ffdd647f3ddc747daf6cb0b4e811dfdcee + pristine_git_object: d3378124db83c92174e28fe36907263e2cbe6938 + docs/models/messageoutputcontentchunks.md: + id: 802048198dc0 + last_write_checksum: sha1:d70a638af21ee46126aa0434bf2d66c8dd8e43ff + pristine_git_object: d9c3d50e295b50618f106ef5f6b40929a28164df + docs/models/messageoutputentry.md: + id: f969119c8134 + last_write_checksum: sha1:cf5032929394584a31b3f12f55dfce6f665f71c7 + pristine_git_object: 5b42e20d1b03263f3d4d9f5cefe6c8d49c984e01 + docs/models/messageoutputentrycontent.md: + id: 44019e6e5698 + last_write_checksum: sha1:d0cc7a8ebe649614c8763aaadbf03624bb9e47e3 + pristine_git_object: 5206e4eb0d95e10b46c91f9f26ae00407d2dd337 + docs/models/messageoutputentryobject.md: + id: b3a7567581df + last_write_checksum: sha1:46528a6f87408c6113d689f2243eddf84bcbc55f + pristine_git_object: bb254c82737007516398287ff7878406866dceeb + docs/models/messageoutputentryrole.md: + id: bf7aafcdddab + last_write_checksum: sha1:e28643b6183866b2759401f7ebf849d4848abb10 + pristine_git_object: 783ee0aae4625f7b6e2ca701ac8fcdddcfe0e412 + docs/models/messageoutputentrytype.md: + id: 960cecf5fde3 + last_write_checksum: sha1:b6e52e971b6eb69582162a7d96979cacff6f5a9c + pristine_git_object: cb4a7a1b15d44a465dbfbd7fe319b8dbc0b62406 + docs/models/messageoutputevent.md: + id: b690693fa806 + last_write_checksum: sha1:8a87ff6b624d133bcea36729fb1b1a1a88b3eaf0 + pristine_git_object: 92c1c61587e34f6e143263e35c33acc9332870d6 + docs/models/messageoutputeventcontent.md: + id: cecea075d823 + last_write_checksum: sha1:16dac25382642cf2614e24cb8dcef6538be34914 + pristine_git_object: 16d8d52f6ff9f43798a94e96c5219314731ab5fb + docs/models/messageoutputeventrole.md: + id: 87d07815e9be + last_write_checksum: sha1:a6db79edc1bf2d7d0f4762653c8d7860cb86e300 + pristine_git_object: e38c6472e577e0f1686e22dc61d589fdb2928434 + docs/models/messageoutputeventtype.md: + id: 13c082072934 + last_write_checksum: sha1:03c07b7a6046e138b9b7c02084727785f05a5a67 + pristine_git_object: 1f43fdcce5a8cfe4d781b4a6faa4a265975ae817 + docs/models/messages.md: + id: 2103cd675c2f + last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 + pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a + docs/models/metricout.md: + id: 7c6ff0ad95f9 + last_write_checksum: sha1:eef34dc522a351e23d7371c00a07662a0711ea73 + pristine_git_object: 3c552bac2fa3a5a3783db994d47d255a94643110 + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 + pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + docs/models/modelcapabilities.md: + id: 283fbc5fa32f + last_write_checksum: sha1:69312b751771ae8ffa0d1452e3c6c545fdbf52b7 + pristine_git_object: 646c8e94fd208cbf01df19ad6c9707ad235bc59b + docs/models/modelconversation.md: + id: 497521ee9bd6 + last_write_checksum: sha1:bd11f51f1b6fedbf8a1e1973889d1961086c164f + pristine_git_object: 1a03ef7d1dd9e1d6b51f0f9391c46feb5cd822a8 + docs/models/modelconversationobject.md: + id: 4c5699d157a9 + last_write_checksum: sha1:8e2e82e1fa4cb97f8c7a8a129b3cc9cd651e4055 + pristine_git_object: ead1fa26f5d9641a198a14b43a0f5689456e5821 + docs/models/modelconversationtools.md: + id: b3463ae729a7 + last_write_checksum: sha1:eb78650e337ab5354a0cdfbfcf975ed02495230b + pristine_git_object: 5cc97437c34263ad650c84c8702e158ee74ecfb1 + docs/models/modellist.md: + id: ce07fd9ce413 + last_write_checksum: sha1:4f2956eeba39cc14f2289f24990e85b3588c132a + pristine_git_object: 760882c6c5b442b09bbc91f910f960138d6a00c8 + docs/models/modeltype.md: + id: 9f69805691d1 + last_write_checksum: sha1:f3a8bce458460e55124ce5dd6814e7cada8e0e89 + pristine_git_object: a31c3ca0aa78cae9619b313f1cda95b9c391ee12 + docs/models/moderationobject.md: + id: 4e84364835f5 + last_write_checksum: sha1:2831033dcc3d93d32b8813498f6eb3082e2d3c4e + pristine_git_object: 320b2ab4935f8751eb58794e8eb9e422de35ae7c + docs/models/moderationresponse.md: + id: e15cf12e553b + last_write_checksum: sha1:18e8f4b4b97cb444824fcdce8f518c4e5a27c372 + pristine_git_object: 75a5eec74071fdd0d330c9f3e10dac0873077f20 + docs/models/name.md: + id: 6ee802922293 + last_write_checksum: sha1:91a266ed489c046a4ec511d4c03eb6e413c2ff02 + pristine_git_object: 18b978a8cc2c38d65c37e7dd110315cedb221620 + docs/models/object.md: + id: 7ffe67d0b83f + last_write_checksum: sha1:dfb590560db658dc5062e7cedc1f3f29c0d012a0 + pristine_git_object: 0122c0db4541d95d57d2edb3f18b9e1921dc3099 + docs/models/ocrimageobject.md: + id: b72f3c5853b2 + last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 + pristine_git_object: 3c0d5544a80499b011467f29ef83d49f53801af6 + docs/models/ocrpagedimensions.md: + id: b3429f9883f5 + last_write_checksum: sha1:6435aa56e6153b0c90a546818ed780105ae1042a + pristine_git_object: c93ca64d5e20319ec6ec1bcb82b28c6ce0940f29 + docs/models/ocrpageobject.md: + id: 88a9e101b11e + last_write_checksum: sha1:091077fedf1b699d5160a21fe352056c247ef988 + pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e + docs/models/ocrrequest.md: + id: 6862a3fc2d0f + last_write_checksum: sha1:f32fcc5916f9eedf7adfaa60beda30a9ec42f32e + pristine_git_object: 76e4da925937fd4bdd42307f116a74d4dbf2bea3 + docs/models/ocrresponse.md: + id: 30042328fb78 + last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 + pristine_git_object: 0a309317644eedc643009b6cec3a7dbb142b1a15 + docs/models/ocrtableobject.md: + id: c967796380e6 + last_write_checksum: sha1:3b78858cc130fc8792ec3d149c8f657fd3f7a4c3 + pristine_git_object: 4e27697c15983f86274648b2d7bacac557081630 + docs/models/ocrusageinfo.md: + id: 419abbb8353a + last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e + pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c + docs/models/one.md: + id: 69a5df93c480 + last_write_checksum: sha1:cb6d46c2939a0e2314e29ff0307a2b0632caca65 + pristine_git_object: 3de496a6201d47ea52fc15bfe16a44bd6d3be900 + docs/models/outputcontentchunks.md: + id: f7e175c8e002 + last_write_checksum: sha1:5094466110028801726cc825e8809f524fe1ee24 + pristine_git_object: c76bc31d4d8791b7bef4dc6cbff6671b38a7927d + docs/models/outputs.md: + id: 58b672ddb5b3 + last_write_checksum: sha1:7553d62771ac5a85f8f330978b400cdd420cf865 + pristine_git_object: 7756c6276cc141b69d8099e0bbcbd2bccc1b5112 + docs/models/paginationinfo.md: + id: 3d2b61cbbf88 + last_write_checksum: sha1:1da38e172024fe703f3180ea3c6ec91fe3c51ed0 + pristine_git_object: ad1fbb86c714c152a5e6e99d8a741e7346884e55 + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/processingstatusout.md: + id: 83c8c59c1802 + last_write_checksum: sha1:046375bb3035cc033d4484099cd7f5a4f53ce88c + pristine_git_object: 7b67583f4209778ac6f945631c0ee03ba1f4c663 + docs/models/queryparamstatus.md: + id: 15628120923d + last_write_checksum: sha1:36f1c9b6a6af6f27fbf0190417abf95b4a0bc1b9 + pristine_git_object: dcd2090861b16f72b0fb321714b4143bc14b7566 + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 + pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc + docs/models/referencechunktype.md: + id: 0944b80ea9c8 + last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 + pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + docs/models/repositories.md: + id: 0531efe9bced + last_write_checksum: sha1:249bdb315eb1f0bd54601e5b8a45e58cb1ec7638 + pristine_git_object: 02274e3d58d55f4a18dfdf578fa53d2459e1345e + docs/models/requestsource.md: + id: 8857ab6025c4 + last_write_checksum: sha1:4b7ecc7c5327c74e46e2b98bd6e3814935cdecdf + pristine_git_object: c81c115992439350d56c91d2e3351a13df40676b + docs/models/response1.md: + id: 245c499462a9 + last_write_checksum: sha1:6d64b50b59875744eb3c1038d7cdcba9397fdbae + pristine_git_object: 2e73fdbb204c14cadc028d0891ede0ca4d4178d7 + docs/models/responsebody.md: + id: a2c4400c632e + last_write_checksum: sha1:a1705a40914ac8f96000953bd53ca01f66643fcd + pristine_git_object: 8a218517178eed859683f87f143c5397f96d10d9 + docs/models/responsedoneevent.md: + id: 38c38c3c065b + last_write_checksum: sha1:9910c6c35ad7cb8e5ae0edabcdba8a8a498b3138 + pristine_git_object: ec25bd6d364b0b4959b11a6d1595bdb57cba6564 + docs/models/responsedoneeventtype.md: + id: 03a896b6f98a + last_write_checksum: sha1:09ccbc7ed0143a884481a5943221be2e4a16c123 + pristine_git_object: 58f7f44d74553f649bf1b54385926a5b5d6033f5 + docs/models/responseerrorevent.md: + id: 3e868aa9958d + last_write_checksum: sha1:9ed1d04b3ed1f468f4dc9218890aa24e0c84fc03 + pristine_git_object: 2ea6a2e0ec412ae484f60fa1d09d02e776499bb9 + docs/models/responseerroreventtype.md: + id: 5595b8eec59e + last_write_checksum: sha1:442185b0615ec81923f4c97478e758b451c52439 + pristine_git_object: 3b3fc303fc7f75c609b18a785f59517b222b6881 + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/responsestartedevent.md: + id: 88e3b9f0aa8d + last_write_checksum: sha1:fa9db583e8223d2d8284866f7e6cf6d775751478 + pristine_git_object: 481bd5bba67a524dbadf9f1570a28ae20ec9f642 + docs/models/responsestartedeventtype.md: + id: 1d27fafe0f03 + last_write_checksum: sha1:c30ca125ec76af9a2191ebc125f5f8b9558b0ecb + pristine_git_object: 2d9273bd02bf371378575619443ec948beec8d66 + docs/models/retrievefileout.md: + id: 8e82ae08d9b5 + last_write_checksum: sha1:600d5ea4f75dab07fb1139112962affcf633a6c9 + pristine_git_object: 28f97dd25718833aaa42c361337e5e60488bcdc8 + docs/models/retrievemodelv1modelsmodelidgetrequest.md: + id: ac567924689c + last_write_checksum: sha1:7534c5ec5f1ae1e750c8f610f81f2106587e81a9 + pristine_git_object: f1280f8862e9d3212a5cfccd9453884b4055710a + docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md: + id: c2a914584353 + last_write_checksum: sha1:bdd52e2c434fc6fd10e341d41de9dda1a28ddb4f + pristine_git_object: 3ac96521a8f58f1ed4caedbb4ab7fe3fe2b238c5 + docs/models/role.md: + id: b694540a5b1e + last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d + pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 + docs/models/sampletype.md: + id: 0e09775cd9d3 + last_write_checksum: sha1:33cef5c5b097ab7a9cd6232fe3f7bca65cd1187a + pristine_git_object: 34a6a012b1daeeb22626417650269e9376cc9170 + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:45b7b8881a6560a468153662d61b99605a492edf + pristine_git_object: 2e0839d06f821dd97780dc22f202dedf23e4efe1 + docs/models/shareenum.md: + id: 53a713500576 + last_write_checksum: sha1:9d45d4bd272e6c146c3a8a21fd759acf2ae22148 + pristine_git_object: dc5d2b68a810c2983b5a47fbff747dfc2cc17598 + docs/models/sharingdelete.md: + id: 165cac179416 + last_write_checksum: sha1:1a0b3c95f4b56173510e234d7a76df85c593f360 + pristine_git_object: 1dcec0950c7fcd264ea9369c24244b54ba2bcfbf + docs/models/sharingin.md: + id: 08d396ee70ad + last_write_checksum: sha1:662edfc07a007e94fe1e54a07cf89d7c83c08df5 + pristine_git_object: bac18c8d43f801e8b5cf5b3cd089f9da0ee2281a + docs/models/sharingout.md: + id: 5db4547c7c56 + last_write_checksum: sha1:bd15c318d1a3f5bee7d7104d34cbd8ba6233bbb8 + pristine_git_object: 35aeff43593f3c9067c22a2f8b1468d7faa5af34 + docs/models/source.md: + id: 6541ef7b41e7 + last_write_checksum: sha1:d0015be42fe759d818ebd75b0cec9f83535a3b89 + pristine_git_object: bb1ed6124647b02c4350123bf257b0bf17fc38fd + docs/models/ssetypes.md: + id: 6a902241137c + last_write_checksum: sha1:567027284c7572c0fa24132cd119e956386ff9d0 + pristine_git_object: ae06b5e870d31b10f17224c99af1628a7252bbc3 + docs/models/status.md: + id: 959cd204aadf + last_write_checksum: sha1:618f30fd5ba191bb918c953864bfac4a63192a40 + pristine_git_object: 5e22eb736c734121b4b057812cacb43b3e299b52 + docs/models/stop.md: + id: f231cc9f5041 + last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 + pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f + pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/tableformat.md: + id: d8cd08c55c3c + last_write_checksum: sha1:e0736ea9576466d71821aa1e67fc632cc5a85414 + pristine_git_object: 54f029b814fdcfa2e93e2b8b0594ef9e4eab792a + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:f04818ca76e68b3d3684927e4032d5d7de882f6a + pristine_git_object: d488cb51abeb4913c8441d9fbe9e5b964099bb7e + docs/models/textchunktype.md: + id: 886e88ebde41 + last_write_checksum: sha1:ba8db2a3910d1c8af424930c01ecc44889335bd3 + pristine_git_object: e2a2ae8bcdf8a35ad580a7de6271a5d26cd19504 + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b + pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab + docs/models/thinkchunktype.md: + id: 0fbeed985341 + last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 + pristine_git_object: baf6f755252d027295be082b53ecf80555039414 + docs/models/thinking.md: + id: 07234f8dd364 + last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 + pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + docs/models/timestampgranularity.md: + id: eb4d5a8e6f08 + last_write_checksum: sha1:c2134d9f1f96d4eef48cedfe2b93eb061d5ea47f + pristine_git_object: 0d2a8054590463a167f69c36c00b8f2fc3c7906d + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolexecutiondeltaevent.md: + id: f2fc876ef7c6 + last_write_checksum: sha1:901756826684886179c21f47c063c55700c79ec4 + pristine_git_object: 7bee6d831a92085a88c0772300bcad4ce8194edb + docs/models/toolexecutiondeltaeventname.md: + id: 93fd3a3b669d + last_write_checksum: sha1:d5dcdb165c220209ee76d81938f2d9808c77d4fc + pristine_git_object: 9c3edef8c0698d7293a71ee56410a0ed67fd1924 + docs/models/toolexecutiondeltaeventtype.md: + id: ae6e8a5bf0ce + last_write_checksum: sha1:dd405269077b6a4756fd086067c9bbe88f430924 + pristine_git_object: a4a2f8cc9927499c990bad0590e84b2a609add8d + docs/models/toolexecutiondoneevent.md: + id: b604a4ca5876 + last_write_checksum: sha1:267ff0e19884e08abf3818b890579c1a13a3fa98 + pristine_git_object: 5898ea5eff103b99886789805d9113dfd8b01588 + docs/models/toolexecutiondoneeventname.md: + id: d19dc0060655 + last_write_checksum: sha1:aa5677087e6933699135a53f664f5b86bbae5ac6 + pristine_git_object: 6449079d7b467796355e3353f4245046cced17e8 + docs/models/toolexecutiondoneeventtype.md: + id: 7c5a318d924b + last_write_checksum: sha1:55a5041cdf8c7e05fcfd7260a72f7cd3f1b2baf8 + pristine_git_object: 872624c1f274259cdd22100995b5d99bf27eaeac + docs/models/toolexecutionentry.md: + id: 75a7560ab96e + last_write_checksum: sha1:66086952d92940830a53f5583f1751b09d902fcf + pristine_git_object: 3678116df64ad398fef00bab39dd35c3fd5ee1f5 + docs/models/toolexecutionentryobject.md: + id: af106f91001f + last_write_checksum: sha1:6df075bee4e84edf9b57fcf62f27b22a4e7700f4 + pristine_git_object: 0ca79af56d60094099c8830f638a748a92a40f21 + docs/models/toolexecutionentrytype.md: + id: b61e79a59610 + last_write_checksum: sha1:b0485bae901e14117f76b8e16fe80023a0913787 + pristine_git_object: a67629b8bdefe59d188969a2b78fa409ffeedb2a + docs/models/toolexecutionstartedevent.md: + id: 37657383654d + last_write_checksum: sha1:3051a74c1746c8341d50a22f34bd54f6347ee0c8 + pristine_git_object: de81312bda08970cded88d1b3df23ebc1481ebf2 + docs/models/toolexecutionstartedeventname.md: + id: be6b33417678 + last_write_checksum: sha1:f8857baa02607b0a0da8d96d130f1cb765e3d364 + pristine_git_object: 3308c483bab521f7fa987a62ebd0ad9cec562c3a + docs/models/toolexecutionstartedeventtype.md: + id: 9eff7a0d9ad5 + last_write_checksum: sha1:86fe6aec11baff4090efd11d10e8b31772598349 + pristine_git_object: 56695d1f804c28808cf92715140959b60eb9a9fd + docs/models/toolfilechunk.md: + id: 67347e2bef90 + last_write_checksum: sha1:a0caa32f798601c7c66c31bfed6ac22ec41c2431 + pristine_git_object: f1b54c7c5fb38df3c886cc20b6b2136d73529f52 + docs/models/toolfilechunktype.md: + id: f895006e53e4 + last_write_checksum: sha1:258a55eef5646f4bf20a150ee0c48780bdddcd19 + pristine_git_object: 7e99acefff265f616b576a90a5f0484add92bffb + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 + pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/toolmessagerole.md: + id: f333d4d1ab56 + last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f + pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 + docs/models/toolreferencechunk.md: + id: 10414b39b7b3 + last_write_checksum: sha1:1169ef0b8a76aa094bc04b1cbdba1bafad758f9d + pristine_git_object: af447aee3824670bdff8a34891ca1ffc543116de + docs/models/toolreferencechunktype.md: + id: 42a4cae4fd96 + last_write_checksum: sha1:43620d9529a1ccb2fac975fbe2e6fcaa62b5baa5 + pristine_git_object: bc57d277a39eef3c112c08ffc31a91f5c075c5a4 + docs/models/tools.md: + id: b78ed2931856 + last_write_checksum: sha1:ea4dcd2eafe87fc271c2f6f22f9b1cedc9f8316e + pristine_git_object: f308d732e3adfcc711590c3e1bee627c94032a6b + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/trainingfile.md: + id: 4039958e8930 + last_write_checksum: sha1:d02543c2d1446e56501f2ac358a09669b0077648 + pristine_git_object: cde218bb2281a1274d013844ad76b4b2a34b986c + docs/models/transcriptionresponse.md: + id: 39e2354aca38 + last_write_checksum: sha1:7b32e2179c3efc675c05bba322cc33554a9ff9db + pristine_git_object: 1bc0189c5d1833c946a71c9773346e21b08d2404 + docs/models/transcriptionsegmentchunk.md: + id: f09db8b2273e + last_write_checksum: sha1:c94ef1aa3dc2568ec77d186fa9061598f0ebccf1 + pristine_git_object: bebc9f72f521cf9cbd1818d53239cd632a025a31 + docs/models/transcriptionstreamdone.md: + id: 2253923d93cf + last_write_checksum: sha1:043ebcd284007f8c8536f2726ec5f525abffeb6b + pristine_git_object: 9ecf7d9ca32410d92c93c62ead9674e097533ec3 + docs/models/transcriptionstreamdonetype.md: + id: 3f5aec641135 + last_write_checksum: sha1:b86f7b20dff031e7dbe02b4805058a025c39dcac + pristine_git_object: db092c4fa47d7401919a02c199198e4ae99a5de1 + docs/models/transcriptionstreamevents.md: + id: d0f4eedfa2b6 + last_write_checksum: sha1:ec6b992049bd0337d57baab56603b1fa36a0a35b + pristine_git_object: f760385dfbd9779e63d61ec6357901bc9b4ca8e9 + docs/models/transcriptionstreameventsdata.md: + id: 506af75a0708 + last_write_checksum: sha1:99fcb3bf3aab0fb87dc02a4e6ccef9271ff0ae89 + pristine_git_object: eea8e9281634c56517e28f613afee38e0b0846ad + docs/models/transcriptionstreameventtypes.md: + id: 701782e8a63d + last_write_checksum: sha1:ff79dfb5d942c807b03c9e329a254bfa95b99a16 + pristine_git_object: e4eb25a6400dcc5a48b5eb5f65e96f7be91fa761 + docs/models/transcriptionstreamlanguage.md: + id: 5e9df200153c + last_write_checksum: sha1:82967c1b056bc1358adb21644bf78f0e37068e0f + pristine_git_object: e16c8fdce3f04ae688ddc18650b359d2dd5d6f6f + docs/models/transcriptionstreamlanguagetype.md: + id: 81c8bd31eeb1 + last_write_checksum: sha1:6cf3efec178180266bccda24f27328edfbebbd93 + pristine_git_object: e93521e10d43299676f44c8297608cc94c6106e6 + docs/models/transcriptionstreamsegmentdelta.md: + id: f59c3fb696f2 + last_write_checksum: sha1:d44b6c1359c0ed504f97edb46b3acf0145967fe7 + pristine_git_object: 3deeedf067c833cae8df1ab366a2e54b3f9e9186 + docs/models/transcriptionstreamsegmentdeltatype.md: + id: 03ee222a3afd + last_write_checksum: sha1:d02b5f92cf2d8182aeaa8dd3428b988ab4fc0fad + pristine_git_object: 03ff3e8bb4f25770200ed9fb43dd246375934c58 + docs/models/transcriptionstreamtextdelta.md: + id: 69a13554b554 + last_write_checksum: sha1:9f6c7bdc50484ff46b6715141cee9912f1f2f3ff + pristine_git_object: adddfe187546c0161260cf06953efb197bf25693 + docs/models/transcriptionstreamtextdeltatype.md: + id: ae14d97dc3fa + last_write_checksum: sha1:2abfea3b109518f7371ab78ade6fa514d6e3e968 + pristine_git_object: b7c9d675402cd122ee61deaa4ea7051c2503cf0e + docs/models/two.md: + id: 3720b8efc931 + last_write_checksum: sha1:8676158171bef1373b5e0b7c91a31c4dd6f9128a + pristine_git_object: 59dc2be2a2036cbdac26683e2afd83085387188f + docs/models/type.md: + id: 98c32f09b2c8 + last_write_checksum: sha1:9b07c46f7e1aacaab319e8dfdcfdfc94a2b7bf31 + pristine_git_object: d05ead75c8f6d38b4dbcc2cdad16f1ba4dd4f7e8 + docs/models/unarchiveftmodelout.md: + id: 4f2a771b328a + last_write_checksum: sha1:b3be8add91bbe10704ff674891f2e6377b34b539 + pristine_git_object: 287c9a007e0b2113738a1884450133558d23540e + docs/models/unarchiveftmodeloutobject.md: + id: 5fa9545c3df0 + last_write_checksum: sha1:29c0a228082142925a0fd72fef5a578f06ac764d + pristine_git_object: 623dcec24e2c676c9d50d3a3547b1dd9ffd78038 + docs/models/updateftmodelin.md: + id: 1b98d220f114 + last_write_checksum: sha1:d1c7a8f5b32228d8e93ad4455fccda51b802f08f + pristine_git_object: 4e55b1a7d96e1ad5c1e65c6f54484b24cd05fcfc + docs/models/uploadfileout.md: + id: c991d0bfc54c + last_write_checksum: sha1:ce5af8ffadb8443a6d1ca5fbbc014de42da35b9d + pristine_git_object: 6f09c9a6920f373c730fa3538b0c2953d757c257 + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed + pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/usermessagerole.md: + id: 99ffa937c462 + last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 + pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + docs/models/wandbintegration.md: + id: ba1f7fe1b1a3 + last_write_checksum: sha1:1702d58db559818304404a5dc8c70d71fb2be716 + pristine_git_object: 199d2eddc61069c80b628a12bff359ac2abc7338 + docs/models/wandbintegrationout.md: + id: c1a0f85273d8 + last_write_checksum: sha1:c2addbba8c15b7c115129d5249c4a6d7dc527d2d + pristine_git_object: cec02ed87555128e6027e00f3385a61028694ac0 + docs/models/wandbintegrationouttype.md: + id: 647c7c2eab8a + last_write_checksum: sha1:78ad7847183b18319995b5e3de0262ba6fffecac + pristine_git_object: 5a7533c99671e0556c3c11f179312ec8268ce477 + docs/models/wandbintegrationtype.md: + id: 08c414c73826 + last_write_checksum: sha1:0990c604ec45f2f1fd1019e87705533b0c9be023 + pristine_git_object: 4fdffe22e370fd64429d83753c30a0079be0e7fd + docs/models/websearchpremiumtool.md: + id: 267988aa8c3f + last_write_checksum: sha1:cc040d754d40c644a2a8fd70302eb7ee864bfff3 + pristine_git_object: 941fc2b8448d4caeae9318fdf08053a2b59a9bee + docs/models/websearchpremiumtooltype.md: + id: c70fa6b0ee9f + last_write_checksum: sha1:069ad330c3f5b3c6b8a375de4484f151698c439c + pristine_git_object: 348bfe854914114c84cd74997a63fe2badc0756d + docs/models/websearchtool.md: + id: fc4df52fb9b5 + last_write_checksum: sha1:53e128c3f0f6781227d99d46838579dc15ab26d2 + pristine_git_object: c8d708bdcdbfc387a09683bdd47ebabedd566cb0 + docs/models/websearchtooltype.md: + id: 6591e569c4f3 + last_write_checksum: sha1:f9b6672bc3fbb5bb70c4919cb7b98160a0ebe9ff + pristine_git_object: 57b6acbbd3b85aae5a9b7e2f754689637c01a912 + docs/sdks/accesses/README.md: + id: 2ea167c2eff2 + last_write_checksum: sha1:ac4ec473f9991ea2ca3e66838f8f791a54d881e3 + pristine_git_object: 040bc24c6acb9153296e105009ac4ef251cc2dd4 + docs/sdks/agents/README.md: + id: 5965d8232fd8 + last_write_checksum: sha1:f368d2c40ad72aa9e8de04809bd300e935dbb63b + pristine_git_object: 173925eead663741af81d5f624c2964278bde979 + docs/sdks/chat/README.md: + id: 393193527c2c + last_write_checksum: sha1:931ab91704f496b220c7da1aa985cea14d969784 + pristine_git_object: 5bb24baa3444d72faace5473d0a775a0e5ad403e + docs/sdks/classifiers/README.md: + id: 74eb09b8d620 + last_write_checksum: sha1:5694a04fcd208d52a0e4cdab9b7e4c4ff5200e52 + pristine_git_object: 57a3f805dd6b12d086a4787bae43349c2f36b88c + docs/sdks/conversations/README.md: + id: e22a9d2c5424 + last_write_checksum: sha1:b4e49eadaf5a3bb50f5c3a88a759bc529db2584f + pristine_git_object: c488848cc4c18a098deae8f02c0d4a86d1d898db + docs/sdks/documents/README.md: + id: 9758e88a0a9d + last_write_checksum: sha1:84791e86c3b9c15f8fd16d2a3df6bd3685023a69 + pristine_git_object: d3f5a9757c2327dab8e5b1962542b37c5e2551af + docs/sdks/embeddings/README.md: + id: 15b5b04486c1 + last_write_checksum: sha1:797d738d9b0b4e9d1db385b73625df2cff728e5d + pristine_git_object: 6a2768a2f8ab43eec86b28a354f555257f7f569d + docs/sdks/files/README.md: + id: e576d7a117f0 + last_write_checksum: sha1:88cd213e513854b8beee72b8ea751f74bf32a845 + pristine_git_object: f0dfd59364c06e84d9cce517594a2912e2b724c8 + docs/sdks/fim/README.md: + id: 499b227bf6ca + last_write_checksum: sha1:824f7d1b58ff0b650367737c0e9b91a9d2d14a45 + pristine_git_object: db6f2e1b65866e1309d94e852fa0a1e82d2606fd + docs/sdks/jobs/README.md: + id: 7371cdc8b89a + last_write_checksum: sha1:5117aebda0558e7b82150f0b91480e3362687a89 + pristine_git_object: 666224a728cc433bca9520437d36a2b526ac2df6 + docs/sdks/libraries/README.md: + id: df9a982905a3 + last_write_checksum: sha1:8769d4b43f93c744fca43c34a7d7e9d99122c886 + pristine_git_object: e672c190ad6ac4623f99357d7e59d52f6722518f + docs/sdks/mistralagents/README.md: + id: 20b3478ad16d + last_write_checksum: sha1:b13e50de2ff10eabb4534f561c8cac185485280b + pristine_git_object: 97819467c39bc4f813093e55756e38ba06263a87 + docs/sdks/mistraljobs/README.md: + id: 71aafa44d228 + last_write_checksum: sha1:643cb6d75091b5c7e1ac87a925c9235f92cbe2f7 + pristine_git_object: cc23c1b9eaf791924dfdc48481ada30a11fa5761 + docs/sdks/models/README.md: + id: b35bdf4bc7ed + last_write_checksum: sha1:8e256360d014fc3384256a9f155c6382f8e16a6d + pristine_git_object: d51866b6cff74932bf86c266f75773c2d3e74fd0 + docs/sdks/ocr/README.md: + id: 545e35d2613e + last_write_checksum: sha1:fc478d79405c775e9ae65334122d4539be952492 + pristine_git_object: 6fd904cc045b8accf5cc11436fd66f4024c9897f + docs/sdks/transcriptions/README.md: + id: 089cf94ecf47 + last_write_checksum: sha1:fdf785e4cbab20aec41122735435a38f582f7f29 + pristine_git_object: 3243258c4debd94e10c98c2b18dcc47838143a5b + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 + pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 + src/mistralai/__init__.py: + id: 7aaa1403a9fc + last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b + pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + src/mistralai/_hooks/__init__.py: + id: 89bd3648c8ca + last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d + pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + src/mistralai/_hooks/sdkhooks.py: + id: a085b78b3f45 + last_write_checksum: sha1:1d9666df503110a00569c2a79886ac3be49a3ffb + pristine_git_object: 1f9a9316c430821226ada4db2b37f87083f1c326 + src/mistralai/_hooks/types.py: + id: 066b285c9341 + last_write_checksum: sha1:16bf3c53068c38ba0f838172787178c883551283 + pristine_git_object: 6d0f3e1166cb0271f89f5ba83441c88199d7a432 + src/mistralai/_version.py: + id: 37b53ba66d7f + last_write_checksum: sha1:513473311e071318f796343544395e606acad0f0 + pristine_git_object: 062842097c149487202ce7d6082bde52245416db + src/mistralai/accesses.py: + id: 98cb4addd052 + last_write_checksum: sha1:5b2b08565036dbfc3cdf55552dcc4059315a1106 + pristine_git_object: ac166838c4288808a7ddda6dfc2de8ae3ce9c04e + src/mistralai/agents.py: + id: aa07ea92bffb + last_write_checksum: sha1:f3cd21ffae99acbdeaf2d9feebddc4ba5697b91c + pristine_git_object: 656f7b4c476c7a309d52eb8f051ea24b38be6f6e + src/mistralai/audio.py: + id: c398f6a11e24 + last_write_checksum: sha1:11f9713b4f970509cffe0e6122c61f9aeafc9e73 + pristine_git_object: 5687abdb5676903661a33a3bee115f289f5fe9df + src/mistralai/basesdk.py: + id: 3127264590ce + last_write_checksum: sha1:5340f1c5976fd87d3b17b285535b63bbbe7db120 + pristine_git_object: c9a32aa13eae485d0159632dadbfbb2452978709 + src/mistralai/batch.py: + id: 60df0c5efce3 + last_write_checksum: sha1:9d463fd6ac747635ab2b0e61c918a098aae5a370 + pristine_git_object: 7ed7ccefdaab2368dc7bb9fa8c718a05dcec3ca6 + src/mistralai/beta.py: + id: 7d1c8d453249 + last_write_checksum: sha1:780b45086f215d1f04983d1ea6c89acc16475cfc + pristine_git_object: 4bbf1fa36053c6754026285f3a149911b653d735 + src/mistralai/chat.py: + id: cb76f81a1426 + last_write_checksum: sha1:428e9d21a379250e2b955587237db7a81842e403 + pristine_git_object: fa4c4ed845400727f6b84f1fc526ffdd54ccb003 + src/mistralai/classifiers.py: + id: a8f7d4c1c787 + last_write_checksum: sha1:8694219d3619d24f60f1425d4bc5f7d3b7491ec8 + pristine_git_object: d1a3c5e575f4d1d63019f0a0e333f96d46c123bc + src/mistralai/conversations.py: + id: be58e57a6198 + last_write_checksum: sha1:ea5a71c7e19bffc6eb3624bcef6215d8be636000 + pristine_git_object: a376c279ae28d92408c647f49213321088cf9535 + src/mistralai/documents.py: + id: 1945602083a8 + last_write_checksum: sha1:11d4f8e3b34a2f01a3d7ca4198ee95ec3e13ed4a + pristine_git_object: 50a177f70d9c528ff0e63e4fa8f99c158ac16f08 + src/mistralai/embeddings.py: + id: 2bbb9b5427d7 + last_write_checksum: sha1:6b4ae1a0d072aa2985791ba818a8f4e9ce41f51b + pristine_git_object: cf2503551d891669abac5ea66fd98c3f3de9f1a7 + src/mistralai/files.py: + id: 0e29db0e2269 + last_write_checksum: sha1:fc15b2165213e069eba23130c1adf0491db2c46e + pristine_git_object: 4189470a653f55e7ebe722fcf32fc8d57db282a8 + src/mistralai/fim.py: + id: 71a865142baf + last_write_checksum: sha1:a8094fc43b585c7e517d3035bece46db8d1faef2 + pristine_git_object: a341149dcc39a19ada0c17f5bf154df7ba6ae5e1 + src/mistralai/fine_tuning.py: + id: 12578f7d13a6 + last_write_checksum: sha1:e48227f7ea5b51d837e7619f59582e663eb94ed1 + pristine_git_object: 8ed5788a58ab2e9d1125b30624c734a602084294 + src/mistralai/httpclient.py: + id: dcfb0dd6b386 + last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 + pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + src/mistralai/jobs.py: + id: 6869267a98bf + last_write_checksum: sha1:18a0f4d704550bf38c1b1d7f5f79f5e835f139dc + pristine_git_object: 6d81920c39f6819fd9b904ae45bff2cdf0f814a5 + src/mistralai/libraries.py: + id: e5b244f28b27 + last_write_checksum: sha1:7084d7b61238494f834fe20dcf387810e77f3eb0 + pristine_git_object: 32648937feb79adf6155423cbe9bac4d7fe52224 + src/mistralai/mistral_agents.py: + id: 671c4985aaa1 + last_write_checksum: sha1:4532a74409cebeb1723e4dcfc1300edcbfd0a000 + pristine_git_object: 235f6a5d7c287311da1a997e51fbb3392ecf1175 + src/mistralai/mistral_jobs.py: + id: 18065a449da0 + last_write_checksum: sha1:c5a6380e45542914aede32a9f139f51fe71f6a2f + pristine_git_object: e59662eaef33d9e4ad650142f37948264da9dec3 + src/mistralai/models/__init__.py: + id: 3228134f03e5 + last_write_checksum: sha1:382d9dc6e44710b1c57c96ec0a2aedcb40e389d7 + pristine_git_object: 7895aeaa960626d7736f84bb6e933a28013c2e3c + src/mistralai/models/agent.py: + id: ca4162a131b1 + last_write_checksum: sha1:fe8a7c8c9c4ba59613d7d89f0c2e7a6958e25f85 + pristine_git_object: eb30905b3de2b69ece35bdd40f390b2fa6ffc5a8 + src/mistralai/models/agentconversation.py: + id: bd3035451c40 + last_write_checksum: sha1:2e4a6a5ae0da2e9ccbb588c8487b48077d561d93 + pristine_git_object: 625fb4fc6697860060dfdeb449986d89efc232d6 + src/mistralai/models/agentcreationrequest.py: + id: 87f33bd9ea58 + last_write_checksum: sha1:a6885376d36a5a17273d8d8d8d45e3d6c3ee1b9f + pristine_git_object: 6a14201eca82f26871ab4f87e547a5e9bcf3b933 + src/mistralai/models/agenthandoffdoneevent.py: + id: 496685a9343b + last_write_checksum: sha1:f03d37569960b56155e977aa68fbbaad8e25f687 + pristine_git_object: 1cdbf45652ff70d045c650734ab6bdc0eca97734 + src/mistralai/models/agenthandoffentry.py: + id: 836045caeb8f + last_write_checksum: sha1:e5c6b73014cd6859a47cb5958cdfa7b105e3aa3e + pristine_git_object: 66136256215caf7c1f174deec70ab9fbfff634fc + src/mistralai/models/agenthandoffstartedevent.py: + id: ce8e306fa522 + last_write_checksum: sha1:2b5bac2f628c0e7cdd6df73404f69f5d405e576c + pristine_git_object: 11bfa918903f8de96f98f722eaaf9a70b4fca8c1 + src/mistralai/models/agents_api_v1_agents_deleteop.py: + id: 588791d168a1 + last_write_checksum: sha1:2dae37c3b9778d688663550b9803d52111577f3e + pristine_git_object: 38e04953cc320f503a2f6e77096985da60896f2a + src/mistralai/models/agents_api_v1_agents_getop.py: + id: 2358eceee519 + last_write_checksum: sha1:362d0c781b2c79d829f6e4901e558aaca937b105 + pristine_git_object: dced6dbb49c31fe2981cbd3865c0d580082a1ade + src/mistralai/models/agents_api_v1_agents_listop.py: + id: 15579851e4fe + last_write_checksum: sha1:95eada5f97520a680f2516c36b91c0830920fdc1 + pristine_git_object: 69a157a60be52f3fb7c2f20d6ccd00b9dd7bace2 + src/mistralai/models/agents_api_v1_agents_update_versionop.py: + id: 262e7a2f05e3 + last_write_checksum: sha1:faa5550d08ddbb8223e8e6f2fcea6f09408bd228 + pristine_git_object: 5e4b97b3b175a8485fd04adc5b92a4870a46bda9 + src/mistralai/models/agents_api_v1_agents_updateop.py: + id: 72f9d6466691 + last_write_checksum: sha1:9c99959045d9d182a9814954dcd769b294267165 + pristine_git_object: 32696fbe60f17067520bf574bac8144abeb7af3f + src/mistralai/models/agents_api_v1_conversations_append_streamop.py: + id: 89a020d8fdfd + last_write_checksum: sha1:ec2fbbc5017a2374ab3f75a33592399b83fcc5f6 + pristine_git_object: d2489ffb2e01dc6a4e93aee931723be55261ca6c + src/mistralai/models/agents_api_v1_conversations_appendop.py: + id: fd73b0582d26 + last_write_checksum: sha1:22f62e8277ae5845e2b3c41d81d962edc3592090 + pristine_git_object: ba37697ea506fe08ecee5ed7585a1deee56a0827 + src/mistralai/models/agents_api_v1_conversations_deleteop.py: + id: ecd0a5c14be5 + last_write_checksum: sha1:bd894dcef52e02541fa09ae0d51755dad946e3c2 + pristine_git_object: 94126cae1a7a4cd09037d8224cd79f63935a2636 + src/mistralai/models/agents_api_v1_conversations_getop.py: + id: 600a28e887fe + last_write_checksum: sha1:b2dbccf934677ed646bb9ad6e947787bb6c4235b + pristine_git_object: a37a61babd146035d51095143f8781c0d94be0c3 + src/mistralai/models/agents_api_v1_conversations_historyop.py: + id: 5e3db049c234 + last_write_checksum: sha1:fde97f139a93c4723abc4f08ebcf20afcdf67d54 + pristine_git_object: b8c33d1b1b18b0a0c6b263962efc1d84d066021a + src/mistralai/models/agents_api_v1_conversations_listop.py: + id: 3cf4a3751a1c + last_write_checksum: sha1:5517d940093925326fed36a7aa5fbe48952c4406 + pristine_git_object: e1c8489bb0a1285f38f389307b9e150190ba0459 + src/mistralai/models/agents_api_v1_conversations_messagesop.py: + id: c7eb683e873e + last_write_checksum: sha1:d96c4e78c4ce75b668bc23aec91be399a0d26541 + pristine_git_object: f0dac8bf6a58882b55c88b12e039357c5ff7dfe4 + src/mistralai/models/agents_api_v1_conversations_restart_streamop.py: + id: c9d4d80d68d5 + last_write_checksum: sha1:8a96d0ccbe2918a13e022f629ea62120e9ed5c0d + pristine_git_object: f39b74eb6358938de7fddf7d1fd92eb4fb011f6b + src/mistralai/models/agents_api_v1_conversations_restartop.py: + id: 9dadcde20152 + last_write_checksum: sha1:44a127399dfcbc7c07af3c686469bcbb6e798b40 + pristine_git_object: f706c066d1de93cf03c9a7829fc3ea79eddfc8ad + src/mistralai/models/agentscompletionrequest.py: + id: 843813a24928 + last_write_checksum: sha1:448a82b0a0bb608700eadc185213dc11f5a3540e + pristine_git_object: cff4df64b11fa7a96316453b61bbe7928e3168dc + src/mistralai/models/agentscompletionstreamrequest.py: + id: 6be8367d3443 + last_write_checksum: sha1:d15d972795a9b2a2d65bc85a4bd86dfacefd7d80 + pristine_git_object: 69edc23c8cf69a30acad980695bab564b88a0e8f + src/mistralai/models/agentupdaterequest.py: + id: 24e7a9fdb507 + last_write_checksum: sha1:a5bb4a17ff80a3471321d38faa1e6605ebe541a4 + pristine_git_object: e496907c084f0a6cf90de6ebbf508d3137699bf0 + src/mistralai/models/apiendpoint.py: + id: b26effd643dc + last_write_checksum: sha1:07ba583784d9099e6a24e94805a405112e2fcb41 + pristine_git_object: 0ad9366f0efbcf989f63fa66750dce2ecc5bb56a + src/mistralai/models/archiveftmodelout.py: + id: 48fc1069be95 + last_write_checksum: sha1:c3c6b5ae470f23805201cd5565fca095bc9b7a74 + pristine_git_object: 0f753cfc948282f4ee5004fe463c091ed99e83a7 + src/mistralai/models/assistantmessage.py: + id: e73f1d43e4ad + last_write_checksum: sha1:b5d1d0a77b9a4e2f7272ff9fe7e319c2bc1bdb25 + pristine_git_object: a38a10c4968634d64f4bdb58d74f4955b29a92a8 + src/mistralai/models/audiochunk.py: + id: ad7cf79b2cca + last_write_checksum: sha1:c13008582708d368c3dee398cc4226f747b5a9d0 + pristine_git_object: 64fc43ff4c4ebb99b7a6c7aa3090b13ba4a2bdbc + src/mistralai/models/audiotranscriptionrequest.py: + id: 4c6a6fee484a + last_write_checksum: sha1:d8fb192581056b4ae053f9e6919874850462cb03 + pristine_git_object: 308e2599f4ba8878b0fc20ee2660289b55ae7c9a + src/mistralai/models/audiotranscriptionrequeststream.py: + id: 863eca721e72 + last_write_checksum: sha1:a7ec74e5e05a705f2d61d1fe8a635178bcea3cd6 + pristine_git_object: 04374503f931f3964851d09def70535276bdf194 + src/mistralai/models/basemodelcard.py: + id: 5554644ee6f2 + last_write_checksum: sha1:aa5af32cda04d45bcdf2c8fb380529c4fbc828aa + pristine_git_object: 706841b7fc71051890201445050b5383c4b0e998 + src/mistralai/models/batcherror.py: + id: 657a766ed6c7 + last_write_checksum: sha1:5d727f59bbc23e36747af5e95ce20fcbf4ab3f7c + pristine_git_object: 4f8234465c57779d026fe65e131ba4cbe2746d40 + src/mistralai/models/batchjobin.py: + id: 7229d3fdd93b + last_write_checksum: sha1:6bbc902220372b8b016008dbf9990a4cea81e193 + pristine_git_object: 475ba8630d2221204a6b95fab528133498e1a7dd + src/mistralai/models/batchjobout.py: + id: 420d2a600dfe + last_write_checksum: sha1:d6debbf0316cd8c23189212837f1043ea6119629 + pristine_git_object: 3d9f0dba5d56ac1df84fd9562e10c3f4580a9b3e + src/mistralai/models/batchjobsout.py: + id: 7bd4a7b41c82 + last_write_checksum: sha1:838e36e981a3dedb54663a32d8657d2a6ffaa364 + pristine_git_object: a1eba5db0ab8d8308b9e933352b55e32b80f33c7 + src/mistralai/models/batchjobstatus.py: + id: ee3393d6b301 + last_write_checksum: sha1:9e042ccd0901fe4fc08fcc8abe5a3f3e1ffe9cbb + pristine_git_object: 4b28059ba71b394d91f32dba3ba538a73c9af7a5 + src/mistralai/models/builtinconnectors.py: + id: 611d5b9f6fa4 + last_write_checksum: sha1:4e94744e3854d4cdc9d1272e4f1d9371f9829a5f + pristine_git_object: 6a3b2476d54096722eb3e7a271629d108028bd35 + src/mistralai/models/chatclassificationrequest.py: + id: 7fee7b849791 + last_write_checksum: sha1:22d8e106c165c9a16f220dc242b9165e5dcd6963 + pristine_git_object: f06f4f34d264d5bd049ced125d8675434c4fab96 + src/mistralai/models/chatcompletionchoice.py: + id: 362cbbc2f932 + last_write_checksum: sha1:42957985d185554675663f6267714232b2b0a01b + pristine_git_object: b26ce1ec8d7bb4bfb98cc50a09d62aa66934a948 + src/mistralai/models/chatcompletionrequest.py: + id: ed77c35d0007 + last_write_checksum: sha1:07468039abacbcbcbed24d8e0ba7f7ce53ba6d3e + pristine_git_object: a309421b78de248042ff18108a6484722c7375e0 + src/mistralai/models/chatcompletionresponse.py: + id: 227c368abb96 + last_write_checksum: sha1:1f8d263cc3388507fcec7a0e2419d755433a1e3e + pristine_git_object: 3d03b1265f4c41b6e11d10edcff0e4f9fea1e434 + src/mistralai/models/chatcompletionstreamrequest.py: + id: d01414c359f7 + last_write_checksum: sha1:3503d0dcdd9b95af6e5121e1d2ad33b4f4f8f772 + pristine_git_object: 7a28cf01899dcd1204f408893b9874e9f8ce4a28 + src/mistralai/models/chatmoderationrequest.py: + id: 9146b8de3702 + last_write_checksum: sha1:c0465d837b1517e061036f69faa0f40464873ff6 + pristine_git_object: 2f58d52fd00e2a1003445a1e524e3856dd8ad4c7 + src/mistralai/models/checkpointout.py: + id: ee97be8b74d3 + last_write_checksum: sha1:55cd36289696fa4da06a06812a62859bac83479f + pristine_git_object: aefb7731d0dfc71db4647509ef4e0ad1d70a3a95 + src/mistralai/models/classificationrequest.py: + id: fbb8aaa182b6 + last_write_checksum: sha1:6640f0f0ae855ad28d247db5d34b7f888c3b51df + pristine_git_object: 39e25390c282448fafad872523405e14d94aab85 + src/mistralai/models/classificationresponse.py: + id: b73b192344cb + last_write_checksum: sha1:0fa30f6b7eba3cbf1951bd45724d99b1ff023bb1 + pristine_git_object: b7741f373f062d552a67550dcd30e0592805ce93 + src/mistralai/models/classificationtargetresult.py: + id: 718124fab7ab + last_write_checksum: sha1:de004f490ec6da5bee26590697a97c68d7db9168 + pristine_git_object: 60c5a51b0a5e3f2b248f1df04ba12ec5075556eb + src/mistralai/models/classifierdetailedjobout.py: + id: aebdcce0d168 + last_write_checksum: sha1:5d16ca3b3c375a899ee25fc9ce74d877d71b7be1 + pristine_git_object: 701aee6e638ee8ca3e43500abce790a6f76df0c7 + src/mistralai/models/classifierftmodelout.py: + id: 12437ddfc64e + last_write_checksum: sha1:2436c401d49eb7fa0440fca6f09045f20bb52da1 + pristine_git_object: d2a31fae8c534b1008b96c8d4f1e22d69b85c6f3 + src/mistralai/models/classifierjobout.py: + id: aa6ee49244f8 + last_write_checksum: sha1:0c2fe0e01ccfa25686565bc836d3745313f61498 + pristine_git_object: a2f7cc08b35152a1b56bbfbaa49f9231df651719 + src/mistralai/models/classifiertargetin.py: + id: 0439c322ce64 + last_write_checksum: sha1:92b7928166f1a0ed8a52c6ccd7523119690d9a35 + pristine_git_object: d8a060e4896cbe9ccf27be91a44a84a3a84589f7 + src/mistralai/models/classifiertargetout.py: + id: 1c9447805aaa + last_write_checksum: sha1:bf961d9be0bd5239032a612eb822ad8adcee6d99 + pristine_git_object: ddc587f46a3bc78df5d88793c768431429ccf409 + src/mistralai/models/classifiertrainingparameters.py: + id: 8d7d510cb1a1 + last_write_checksum: sha1:72c19293d514c684e1bd4a432b34382f4d674e26 + pristine_git_object: 718beeac3aa1fc2b8af52d61510f34414bcab990 + src/mistralai/models/classifiertrainingparametersin.py: + id: 3da8da32eac4 + last_write_checksum: sha1:ae5088ac22014504b3d3494db46869b87716342b + pristine_git_object: 9868843fbb81cc45657980b36c3c9409d386114d + src/mistralai/models/codeinterpretertool.py: + id: 8c90fc7cca85 + last_write_checksum: sha1:d0e3832422493176bcb29b4edec0aa40c34faa12 + pristine_git_object: 48b74ee85c897179f6f2855d6737e34031b6c0f8 + src/mistralai/models/completionargs.py: + id: 6673897ce695 + last_write_checksum: sha1:a6b22e1abc324b8adceb65cbf990c0a0ab34b603 + pristine_git_object: 40aa0314895b5b2e9b598d05f9987d39518a6c60 + src/mistralai/models/completionargsstop.py: + id: d3cf548dde2f + last_write_checksum: sha1:99912f7a10e92419308cf3c112c36f023de3fc11 + pristine_git_object: de7a09564540daaa6819f06195c347b3e01914f7 + src/mistralai/models/completionchunk.py: + id: d3dba36f2e47 + last_write_checksum: sha1:e93199f69c09b0f7c5c169c90c990a7e7439b64a + pristine_git_object: 4d1fcfbf2e46382cc1b8bbe760efa66ceb4207b3 + src/mistralai/models/completiondetailedjobout.py: + id: 7e46c1d1597b + last_write_checksum: sha1:4ef7f96a2ac505891fec22e4fe491ea21da67e0b + pristine_git_object: df41bc2ab5bf484d755d31fa132158bd1dc5b489 + src/mistralai/models/completionevent.py: + id: 7d9b2ff555f0 + last_write_checksum: sha1:268f8b79bf33e0113d1146577827fe10e47d3078 + pristine_git_object: cc8599103944b8eebead6b315098a823e4d086e3 + src/mistralai/models/completionftmodelout.py: + id: 20e6aae7163d + last_write_checksum: sha1:8272d246489fe8d3743d28b37b49b660ca832ea1 + pristine_git_object: 7b6520de657363e984eef8efd870b4b841dc52e0 + src/mistralai/models/completionjobout.py: + id: 36ce54765988 + last_write_checksum: sha1:c167fae08705eccd65ec30e99046276bdcdd1b97 + pristine_git_object: 70995d2a8e45ac5bf9a4b870d7b745e07f09856f + src/mistralai/models/completionresponsestreamchoice.py: + id: a5323819cf5b + last_write_checksum: sha1:36f1a3b47ab8de274d9d0adaa98dca7bfbc4a773 + pristine_git_object: c965751569df0f6996e8b5eccfe10bb056a7267c + src/mistralai/models/completiontrainingparameters.py: + id: 701db02d1d12 + last_write_checksum: sha1:bb6d3ca605c585e6281d85363e374923ed6ddd33 + pristine_git_object: 0200e81c35f05863eee7753e530d9c2290c56404 + src/mistralai/models/completiontrainingparametersin.py: + id: 0858706b6fc7 + last_write_checksum: sha1:0c8735e28dc6c27bf759a6bd93e8f1cf0919b382 + pristine_git_object: 1f74bb9da85bd721c8f11521b916ae986cd473eb + src/mistralai/models/contentchunk.py: + id: f753f1e60f3b + last_write_checksum: sha1:af68b3ca874420a034d7e116a67974da125d5a30 + pristine_git_object: 47170eefb0ed04399548d254896fa616b24ec258 + src/mistralai/models/conversationappendrequest.py: + id: ddbd85dab2db + last_write_checksum: sha1:c8ca45ad5b8340531a469e9847ee64f80c8db4c3 + pristine_git_object: 15cbc687396ee59eee742d65e490c354fdbf0688 + src/mistralai/models/conversationappendstreamrequest.py: + id: 7d9c85747963 + last_write_checksum: sha1:ada1cbcad5ce2dd6a6bc268b30f78dc69901ff6c + pristine_git_object: 8cecf89d3342be9a94066716863f4fa121b29012 + src/mistralai/models/conversationevents.py: + id: f543ca03cde2 + last_write_checksum: sha1:7e6ac7ea6f4e216071af7460133b6c7791f9ce65 + pristine_git_object: ba4c628c9de7fb85b1dcd5a47282f97df62a3730 + src/mistralai/models/conversationhistory.py: + id: ab4d51ae0094 + last_write_checksum: sha1:1d85aa48d019ce003e2d151477e0c5925bd619e7 + pristine_git_object: d5206a571e865e80981ebfcc99e65859b0dc1ad1 + src/mistralai/models/conversationinputs.py: + id: 50986036d205 + last_write_checksum: sha1:3e8c4650808b8059c3a0e9b1db60136ba35942df + pristine_git_object: 4d30cd76d14358e12c3d30c22e3c95078ecde4bd + src/mistralai/models/conversationmessages.py: + id: be3ced2d07e7 + last_write_checksum: sha1:410317f1b45f395faa66a9becd7bb2398511ba60 + pristine_git_object: 32ca9c20cb37ff65f7e9b126650a78a4b97e4b56 + src/mistralai/models/conversationrequest.py: + id: ceffcc288c2d + last_write_checksum: sha1:32e7b41c01d2d7accccb1f79248b9e1c56c816f3 + pristine_git_object: 09d934ed3db66ecbd5ab8e3406c3ffb8a1c3c606 + src/mistralai/models/conversationresponse.py: + id: 016ec02abd32 + last_write_checksum: sha1:37c3f143b83939b369fe8637932974d163da3c37 + pristine_git_object: ff318e35ee63e43c64e504301236327374442a16 + src/mistralai/models/conversationrestartrequest.py: + id: 2a8207f159f5 + last_write_checksum: sha1:8f53b5faba0b19d8fdf22388c72eb2580ee121f6 + pristine_git_object: a9c8410c7b1010780bf1d98b1580453aeef07509 + src/mistralai/models/conversationrestartstreamrequest.py: + id: d98d3e0c8eed + last_write_checksum: sha1:cba039d9276869be283d83218659f4bf7537b958 + pristine_git_object: 0703bb5fe6566ff15677e5f604537ab9ae2b79bd + src/mistralai/models/conversationstreamrequest.py: + id: f7051f125d44 + last_write_checksum: sha1:7ce5ab24500754f4c4f36fd07934fe992d7bbb2e + pristine_git_object: 6ff56e1786e7342284bac0fb4b669806cee55c0f + src/mistralai/models/conversationusageinfo.py: + id: 922894aa994b + last_write_checksum: sha1:0e0039421d7291ecbbf820ea843031c50371dd9e + pristine_git_object: 9ae6f4fb6a7b4fd056c677c2152625de422b490a + src/mistralai/models/delete_model_v1_models_model_id_deleteop.py: + id: 409899d6ca23 + last_write_checksum: sha1:2d1e5b8947b56abba06363358973032e196c8139 + pristine_git_object: 4acb8d5373f25d7200378d0b8a767451978aa5a9 + src/mistralai/models/deletefileout.py: + id: d51d0de32738 + last_write_checksum: sha1:da9e95bb804820dea4977f65f62c08e491d9bb4b + pristine_git_object: 2b346ec4879c8811f824c7e6bde9fef922f37382 + src/mistralai/models/deletemodelout.py: + id: 8dcf3427f17b + last_write_checksum: sha1:8243b0bcf735a67d4cffb254fe9de95f130a0d8a + pristine_git_object: c1b1effcbe3b093f7dede49684cf88aa0a9b27a7 + src/mistralai/models/deltamessage.py: + id: 43ee8a48546e + last_write_checksum: sha1:8bc50b7943d5ae4725eb57b7ca21a4c1217e4c0d + pristine_git_object: 88aefe7f652296c02377714586d38b8e318a419d + src/mistralai/models/documentlibrarytool.py: + id: 24c1c0293181 + last_write_checksum: sha1:7ec74875595149f433ee1b8a95d8183aa1cf8738 + pristine_git_object: 8d4c122b0412682a792c754a06e10809bfd8c25c + src/mistralai/models/documentout.py: + id: 205cb7721dfa + last_write_checksum: sha1:9316ed725bd9d7a2ef1f4e856f61def684442bd7 + pristine_git_object: 81d9605f38e40a703911fefc15731ec102c74ccb + src/mistralai/models/documenttextcontent.py: + id: 685680d8640b + last_write_checksum: sha1:dafce4998fa5964ac6833e71f7cb4f23455c14e6 + pristine_git_object: c02528c2052d535f7c815fb1165df451d49fef79 + src/mistralai/models/documentupdatein.py: + id: 6d69a91f40bd + last_write_checksum: sha1:dcbc51f1a1192bb99732405420e57fedb32dd1de + pristine_git_object: bd89ff4793e4fd78a4bae1c9f5aad716011ecbfd + src/mistralai/models/documenturlchunk.py: + id: 34a86f25f54f + last_write_checksum: sha1:1496b3d587fd2c5dc1c3f18de1ac59a29c324849 + pristine_git_object: 6d0b1dc6c9f6ebca8638e0c8991a9aa6df2b7e48 + src/mistralai/models/embeddingdtype.py: + id: bca8ae3779ed + last_write_checksum: sha1:962f629fa4ee8a36e731d33f8f730d5741a9e772 + pristine_git_object: 26eee779e12ae8114a90d3f18f99f3dd50e46b9e + src/mistralai/models/embeddingrequest.py: + id: ccb2b16068c8 + last_write_checksum: sha1:46cd92d04646e91b617ef498cb2a105fa4b8f52e + pristine_git_object: 4af890a335bb4e32308fe288fe27e3cec6d3a443 + src/mistralai/models/embeddingresponse.py: + id: c38279b9f663 + last_write_checksum: sha1:369740f705b08fede21edc04adf86505e55c9b76 + pristine_git_object: aae6fa60e131d4378bc631576b18f4d8a47f2770 + src/mistralai/models/embeddingresponsedata.py: + id: b73c5696eb71 + last_write_checksum: sha1:9709503bdde0a61603237fe6e84c410467e7e9f4 + pristine_git_object: 01e2765fb206b0ee36dfeb51cf3066613c74ac13 + src/mistralai/models/encodingformat.py: + id: 9f4fad7d5a9e + last_write_checksum: sha1:f9a3568cd008edb02f475a860e5849d9a40d0246 + pristine_git_object: be6c1a14e4680e24e70b8bbda018759056b784ca + src/mistralai/models/entitytype.py: + id: 4d056950d537 + last_write_checksum: sha1:7087fb7ad2886188380cd692997b2850c950a6b8 + pristine_git_object: 8d2d4bbe837da3e21988548e09710ab629d1aacd + src/mistralai/models/eventout.py: + id: 2601c7113273 + last_write_checksum: sha1:93ba178c3f6459dbc638e49c3eddcc188c7ff5d0 + pristine_git_object: 3281903429b154eb095a7c41b1751cfef97e497d + src/mistralai/models/file.py: + id: 7c1aa0c610c0 + last_write_checksum: sha1:3735ec925554b397e36fd2322062f555fbcde270 + pristine_git_object: 682d7f6e24b736dabd0566ab1b45b20dae5ea019 + src/mistralai/models/filechunk.py: + id: ea6a1ad435e8 + last_write_checksum: sha1:56d91860c1c91c40662313ea6f156db886bb55b6 + pristine_git_object: 83e60cef29045ced5ae48b68481bce3317690b8e + src/mistralai/models/filepurpose.py: + id: 3928b3171a09 + last_write_checksum: sha1:2ffb9fd99624b7b9997f826526045a9a956fde14 + pristine_git_object: b109b35017d5aa086ac964d78163f41e64277874 + src/mistralai/models/files_api_routes_delete_fileop.py: + id: fa02d4d126c7 + last_write_checksum: sha1:c96b106d6496087673f6d1b914e748c49ec13755 + pristine_git_object: a84a7a8eee4b6895bb2e835f82376126b3e423ec + src/mistralai/models/files_api_routes_download_fileop.py: + id: 1dc2e2823a00 + last_write_checksum: sha1:6001bcf871ab76635abcb3f081b029c8154a191e + pristine_git_object: 168a7fa6701578b77876fe0bddeb1003d06f33b7 + src/mistralai/models/files_api_routes_get_signed_urlop.py: + id: 628ed2f82ce4 + last_write_checksum: sha1:c970025b1e453ad67298d12611542abb46ded54d + pristine_git_object: 708d40ab993f93227b9795c745383ab954c1c89c + src/mistralai/models/files_api_routes_list_filesop.py: + id: 865dd74c577c + last_write_checksum: sha1:079f828a7ea8f3b9724e6e3661b261f4c2843a5f + pristine_git_object: 8e174a58f0ea1d71f79f48ab26de29a65767c275 + src/mistralai/models/files_api_routes_retrieve_fileop.py: + id: d821f72ee198 + last_write_checksum: sha1:d0d07123fd941bb99a00a36e87bc7ab4c21506a6 + pristine_git_object: 0c2a95ef590f179fe60a19340e34adb230dd8901 + src/mistralai/models/files_api_routes_upload_fileop.py: + id: ccca25a2fe91 + last_write_checksum: sha1:15aab76863daa16a8359745e846c7a5acce78584 + pristine_git_object: 34321cf538b265653598d3d0d8bb4d4940d1090a + src/mistralai/models/fileschema.py: + id: 8a02ff440be5 + last_write_checksum: sha1:4d4b230971c0b1abf211f6b40ce447eb2bb93970 + pristine_git_object: 7c7b60c6302f0ec7ec689260a937ac356931a912 + src/mistralai/models/filesignedurl.py: + id: 6fe55959eedd + last_write_checksum: sha1:afbe1cdfbdf2f760fc996a5065c70fa271a35885 + pristine_git_object: 092be7f8090272bdebfea6cbda7b87d9877d59e8 + src/mistralai/models/fimcompletionrequest.py: + id: a54284b7041a + last_write_checksum: sha1:7e477e032b3a48fe08610dd5dc50dee0948950e9 + pristine_git_object: 801a358b02441b7537f4bae64e93b4308c720040 + src/mistralai/models/fimcompletionresponse.py: + id: 15f25c04c5dd + last_write_checksum: sha1:b7787a7dc82b31ed851a52ae2f0828cc8746d61e + pristine_git_object: f27972b9e6e2f9dc7837be7278fda4910755f1f4 + src/mistralai/models/fimcompletionstreamrequest.py: + id: ba6b92828dc7 + last_write_checksum: sha1:a8f2c6cbd5a41ad85b7d0faced90d8f05b29f646 + pristine_git_object: 2e8e6db2a21a86ffd7cc61f92fed5c55f19e2e50 + src/mistralai/models/finetuneablemodeltype.py: + id: cbd439e85b18 + last_write_checksum: sha1:8694f016e8a4758308225b92b57bee162accf9d7 + pristine_git_object: f5b8b2ed45b56d25b387da44c398ae79f3a52c73 + src/mistralai/models/ftclassifierlossfunction.py: + id: 95255316968d + last_write_checksum: sha1:69e08ab728e095b8e3846ed2dc142aa1e82a864a + pristine_git_object: c4ef66e0fe69edace4912f2708f69a6e606c0654 + src/mistralai/models/ftmodelcapabilitiesout.py: + id: 1bc9230e1852 + last_write_checksum: sha1:c841f76ba219c82e3324b69ad8eba4abd522d0b9 + pristine_git_object: 7f3aa18b982c11fb6463e96333250b632dd195c8 + src/mistralai/models/ftmodelcard.py: + id: 4f25bcf18e86 + last_write_checksum: sha1:f1d80e6aa664e63b4a23a6365465d42415fc4bbb + pristine_git_object: 1c3bd04da0cc2bc86bec97d7890ad6594879b334 + src/mistralai/models/function.py: + id: 66b7b7ab8fc4 + last_write_checksum: sha1:5da05a98ca5a68c175bd212dd41127ef98013da6 + pristine_git_object: 7d40cf758ffbb3b6b4e62b50274829bd1c809a9c + src/mistralai/models/functioncall.py: + id: 5e03760bb753 + last_write_checksum: sha1:20d2a8196b6ccaffe490b188b1482a309b2dce79 + pristine_git_object: 0cce622a4835fcbd9425928b115a707848c65f54 + src/mistralai/models/functioncallentry.py: + id: 1d5c6cef6e92 + last_write_checksum: sha1:f357b1fde226c52c0dc2b105df66aeb6d17ab1bf + pristine_git_object: 4ea62c4ffc671b20d35cd967f3da0f1a34c92e2e + src/mistralai/models/functioncallentryarguments.py: + id: bd63a10181da + last_write_checksum: sha1:6beb9aca5bfc2719f357f47a5627c9edccef051f + pristine_git_object: ac9e6227647b28bfd135c35bd32ca792d8dd414b + src/mistralai/models/functioncallevent.py: + id: 868025c914c8 + last_write_checksum: sha1:4eb5b07218c9ab923cbe689e3de116d14281a422 + pristine_git_object: e3992cf173907a485ced9ec12323a680613e9e6a + src/mistralai/models/functionname.py: + id: 46a9b195fef5 + last_write_checksum: sha1:2219be87b06033dad9933b2f4efd99a4758179f1 + pristine_git_object: 0a6c0b1411b6f9194453c9fe22d52d035eb80c4f + src/mistralai/models/functionresultentry.py: + id: d617bbe28e36 + last_write_checksum: sha1:a781805577eb871b4595bae235c1d25e2e483fdc + pristine_git_object: 1c61395a82830dc689f2e011b9e6c86eba58cda3 + src/mistralai/models/functiontool.py: + id: e1b3d619ef0b + last_write_checksum: sha1:31e375a2222079e9e70459c55ff27a8b3add869d + pristine_git_object: 009fe28008a166d551566378e3c2730963aca591 + src/mistralai/models/githubrepositoryin.py: + id: e7f21180a768 + last_write_checksum: sha1:b4f630e15057e4ff8bfc5fb7ba2f0085a76c5f06 + pristine_git_object: b16ce0d2898b000f08e3d960a3411941a2324473 + src/mistralai/models/githubrepositoryout.py: + id: a3e494bbd813 + last_write_checksum: sha1:00a9bc4d6308cd960077fb639b1778723a71f583 + pristine_git_object: 372477c106a37b1b9d5cec02751c63fb08abcf53 + src/mistralai/models/httpvalidationerror.py: + id: 224ee4b3f0f0 + last_write_checksum: sha1:3f8d51b670993863fcd17421d1ace72e8621fd51 + pristine_git_object: d467577af04921f5d9bfa906ae6f4e06055a8785 + src/mistralai/models/imagegenerationtool.py: + id: 63bbe395acb2 + last_write_checksum: sha1:404e9cbabada212b87cc2e0b8799a18ff1cecf95 + pristine_git_object: a92335dbd2d0d03be5c2df4132df1cc26eaf38dd + src/mistralai/models/imageurl.py: + id: 20116779b5a0 + last_write_checksum: sha1:2d6090577370f5eb2e364029a11bb61bd86ef226 + pristine_git_object: 6f077b69019fbc598ddc402ba991c83f8a047632 + src/mistralai/models/imageurlchunk.py: + id: 0a6e87c96993 + last_write_checksum: sha1:0b7e4c0d5129698b1b01608eb59b27513f6a9818 + pristine_git_object: 8e8aac4238381527d9156fcb72288b28a82f9689 + src/mistralai/models/inputentries.py: + id: cbf378d5b92a + last_write_checksum: sha1:afc03830974af11516c0b997f1cd181218ee4fb0 + pristine_git_object: 8ae29837a6c090fbe1998562684d3a372a9bdc31 + src/mistralai/models/inputs.py: + id: a53031bc9cb6 + last_write_checksum: sha1:94290a72cb6cfa40813bc79a66a463978ae9ae1c + pristine_git_object: 34d20f3428a5d994c4a199c411dc8097b3c259d7 + src/mistralai/models/instructrequest.py: + id: d23d1da148c8 + last_write_checksum: sha1:2c4f4babc9944f90bc725bb0c460c8de85b3d75e + pristine_git_object: dddbda00a418629462e3df12a61a6b1c56c1d2bd + src/mistralai/models/jobin.py: + id: 42f6df34c72e + last_write_checksum: sha1:e5a78c9a2cd48fb1d7d062ec2f8d54f8d3ac493e + pristine_git_object: aa0cd06c704902919f672e263e969630df783ef6 + src/mistralai/models/jobmetadataout.py: + id: eaa2e54e2e2b + last_write_checksum: sha1:90afd144e2f9ec77c3be2694db1d96e4bc23fecb + pristine_git_object: 10ef781ebbba4c5eaab6f40f5d5f9f828944c983 + src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py: + id: 5d3a14d60da7 + last_write_checksum: sha1:4925f408587e91581c0181baf9acd1dcb5a50768 + pristine_git_object: 5b83d534d7efd25c0bc47406c79dfd59e22ec1d6 + src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py: + id: 74c718778882 + last_write_checksum: sha1:82b57873fe45307e40e59ce0f9f6fbcd48140d7b + pristine_git_object: d9c7b398806a85fc2a9d937de734c61cff43e071 + src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py: + id: 072c77cfbaa5 + last_write_checksum: sha1:f890bc21fa71e33a930d48cdbf18fd503419406c + pristine_git_object: c48246d54c696bd85fbe67348d5eef1a2a1944db + src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: + id: db002a822be0 + last_write_checksum: sha1:3a1019f200193556df61cbe3786b03c2dbab431f + pristine_git_object: d728efd175f1df6b59b74d0b2fa602c0e0199897 + src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: + id: ad69f51c764d + last_write_checksum: sha1:c84477987738a389ddf88546060263ecfb46506a + pristine_git_object: ceb19a69131958a2de6c3e678c40a1ca5d35fd73 + src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: + id: a5c2c6e89b85 + last_write_checksum: sha1:dfb755d386e7c93540f42392f18efae7f61c4625 + pristine_git_object: 39af3ea6fab66941faf7718d616ff2a386e8219b + src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: + id: 221ec5d0482f + last_write_checksum: sha1:f2ce2c6a8924deda372d749ea2a09a2526b8da44 + pristine_git_object: be99dd2d329f5921513ba3ad6e5c5a9807d1a363 + src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: + id: bd0fd94f34fc + last_write_checksum: sha1:48390cf76ffc1d712e33bd0bcece8dea956e75cb + pristine_git_object: 9aec8eb25c54e8fecedd9dd9e823ccf32c1a36b8 + src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: + id: cba224459ae6 + last_write_checksum: sha1:238eeb9b7f48ff4e3262cc0cc5e55d96fe565073 + pristine_git_object: 8103b67b55eab0f9197cd9fb421e6ea4ca10e76e + src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: + id: ecc5a3420980 + last_write_checksum: sha1:8e026bc610fead1e55886c741f6b38817bb6b2ff + pristine_git_object: a84274ff5b2c45f2adc2c0234db090c498decc51 + src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: + id: 3e8d8e70d526 + last_write_checksum: sha1:a5538fcb4248fd83749dc303f9585d7354ff8b92 + pristine_git_object: a10528ca0f7056ef82e0aeae8f4262c65e47791d + src/mistralai/models/jobsout.py: + id: bb1000b03e73 + last_write_checksum: sha1:d06d7b33e5630d45795efc2a8443ae3070866b07 + pristine_git_object: 680b1d582bc8fbce17a381be8364333dd87ce333 + src/mistralai/models/jsonschema.py: + id: 4bcf195c31bb + last_write_checksum: sha1:a0d2b72f809e321fc8abf740e57ec39a384c09d4 + pristine_git_object: e2b6a45e5e5e68b6f562dc39519ab12ffca50322 + src/mistralai/models/legacyjobmetadataout.py: + id: 172ade2efb26 + last_write_checksum: sha1:bf608218a88f7e59cd6c9d0958940b68a200ba0d + pristine_git_object: 499512197a9f9600ac9f7cee43f024dde67fd775 + src/mistralai/models/libraries_delete_v1op.py: + id: ef50051027ec + last_write_checksum: sha1:2a9632da75355679918714a68b96e3ddf88fa5d3 + pristine_git_object: 56f8f8a8706b7aac67cf9b156a2e8710a4fdef36 + src/mistralai/models/libraries_documents_delete_v1op.py: + id: e18557420efe + last_write_checksum: sha1:6904ea388795a0b5f523959c979cf9b3a2c3ef4e + pristine_git_object: c33710b0e29664594891055c36199ea4846516dc + src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py: + id: c8df3283cb98 + last_write_checksum: sha1:fefde9e22a010f900bd9012a2d438f909d54815f + pristine_git_object: e2459c1c68c81eb67983ac76de23dd8609420291 + src/mistralai/models/libraries_documents_get_signed_url_v1op.py: + id: 279ac5d9f945 + last_write_checksum: sha1:8ee5b6386f98d2af619f070e83e1f3772c07e199 + pristine_git_object: bc913ba56bd98d9937ddd5516837b5a8ead10454 + src/mistralai/models/libraries_documents_get_status_v1op.py: + id: ded8f142264f + last_write_checksum: sha1:ac1f85ecb74ef43e6e831794badbbd57e99f7028 + pristine_git_object: 08992d7c9ee5ba85ef97971fa6e06af465e39fa9 + src/mistralai/models/libraries_documents_get_text_content_v1op.py: + id: 497b693d0ba6 + last_write_checksum: sha1:11eeb61bab8b745ba22f2087393ba0cf91b76180 + pristine_git_object: 21a131ad6448597a996f7d96723f6bc8cf12ddf0 + src/mistralai/models/libraries_documents_get_v1op.py: + id: 7b1e6957ca40 + last_write_checksum: sha1:a3e3d1dee18ee2900417db836b1f8b49a14e0501 + pristine_git_object: ff2bdedbcaa8cf4c8e31091ed529274bf5d3ec04 + src/mistralai/models/libraries_documents_list_v1op.py: + id: d5cc573ae1a0 + last_write_checksum: sha1:43b6af0f23ff88d6e13f48acf12baa01a03eb243 + pristine_git_object: e6ff29cf4edb7b269cd66c5299b7531b13973dd2 + src/mistralai/models/libraries_documents_reprocess_v1op.py: + id: 3e832394e71b + last_write_checksum: sha1:36ced698b57573338eb95f5d70983ba4b9dcb0e0 + pristine_git_object: 861993e7e0fd06576e878758a44029613d381a4c + src/mistralai/models/libraries_documents_update_v1op.py: + id: 902a2c649e04 + last_write_checksum: sha1:c8ba64250a66dbdd9ac409ffeccb6bb75ba619c2 + pristine_git_object: 5551d5eec7961a5cc0fa9018ba680304e1f99d57 + src/mistralai/models/libraries_documents_upload_v1op.py: + id: a4586d35c41c + last_write_checksum: sha1:83c40a6b1a790d292c72c90847926d458ea73d83 + pristine_git_object: 51f536cca6141b0243d3c3fff8da3224a0c51ea5 + src/mistralai/models/libraries_get_v1op.py: + id: ed8ae2dc35b4 + last_write_checksum: sha1:c9dc682319790ec77c3827b44e3e8937de0de17f + pristine_git_object: b87090f6bb56c7f7d019483c0e979f9f2fdc3378 + src/mistralai/models/libraries_share_create_v1op.py: + id: 6a5d94d8a3dc + last_write_checksum: sha1:312ec2ea1635e86da293a0f402498031591c9854 + pristine_git_object: a8b0e35db9a452a62dbc0893009a9708684d2a23 + src/mistralai/models/libraries_share_delete_v1op.py: + id: 474f847642a7 + last_write_checksum: sha1:557000669df73a160d83bcaaf456579890fa7f92 + pristine_git_object: e29d556a73a87a6f799948f05517a50545dfd79e + src/mistralai/models/libraries_share_list_v1op.py: + id: 5ccdc4491119 + last_write_checksum: sha1:c3ca37074f14aad02a9d01099fe7134204d5520e + pristine_git_object: b276d756e95e9e7dc53cd7ff5da857052c055046 + src/mistralai/models/libraries_update_v1op.py: + id: 6de043d02383 + last_write_checksum: sha1:0936d1273af7659d7283c1defc2094178bc58003 + pristine_git_object: c93895d97f165d4fa4cc33097f6b772b55337623 + src/mistralai/models/libraryin.py: + id: 0277ef6b7a58 + last_write_checksum: sha1:56e033aef199fd831da7efff829c266206134f99 + pristine_git_object: 872d494d66abde55130a6d2a6c30de950f51232c + src/mistralai/models/libraryinupdate.py: + id: 96904d836434 + last_write_checksum: sha1:50c13a51aee5fc6c562090dad803ca6b3a1a5bed + pristine_git_object: 6e8ab81acae479e5fb999c91bfc55f6e1cbee5cc + src/mistralai/models/libraryout.py: + id: e483109c6e21 + last_write_checksum: sha1:6394431205bd4c308de4ee600e839ac0c6624fc0 + pristine_git_object: d3bc36f94735fbabb23d6c19ff481e404227f548 + src/mistralai/models/listdocumentout.py: + id: 872891f10a41 + last_write_checksum: sha1:61f444f7318e20921ddda1efd1e63e9bbec1d93d + pristine_git_object: 9d39e0873f463cce5fca723a3c85f47cf0f6ddeb + src/mistralai/models/listfilesout.py: + id: 43a961a42ca8 + last_write_checksum: sha1:d3e0d056a8337adaffced63e2ed5b4b37a60927d + pristine_git_object: 2f82b37db7f3cb69d68ab097f9f75488939f66c8 + src/mistralai/models/listlibraryout.py: + id: dcd1a940efe5 + last_write_checksum: sha1:7dc2876bf50861c8e94079859725cadf2d7b14c4 + pristine_git_object: 1e647fe1db65421d73ba6e0f35cc580e99ea7212 + src/mistralai/models/listsharingout.py: + id: c04e23806a57 + last_write_checksum: sha1:efd9e780445bdcf4a4e7794cd1aedaa85067f904 + pristine_git_object: 38c0dbe0ab9aeb3c977e38f2bf95d84297456980 + src/mistralai/models/messageentries.py: + id: 2e456a2494da + last_write_checksum: sha1:7c2503f4a1be5e5dde5640dd7413fed06aee09b4 + pristine_git_object: 9b1706dee5fbb5cae18674708a1571b187bf0576 + src/mistralai/models/messageinputcontentchunks.py: + id: 344669e96a85 + last_write_checksum: sha1:740e82b72d5472f0cc967e745c07393e2df8ae38 + pristine_git_object: e90d8aa0317e553bfc0cceb4a356cf9994ecfb60 + src/mistralai/models/messageinputentry.py: + id: 2e0500be6230 + last_write_checksum: sha1:118ffb7715993d7c103be5d26894ce33d8437f8a + pristine_git_object: edf05631be8d89002fd3a3bfb3034a143b12ed21 + src/mistralai/models/messageoutputcontentchunks.py: + id: e8bb72ef0c0f + last_write_checksum: sha1:f239151ae206f6e82ee3096d357ff33cf9a08138 + pristine_git_object: 136a7608e7e2a612d48271a7c257e2bb383584f3 + src/mistralai/models/messageoutputentry.py: + id: 0113bf848952 + last_write_checksum: sha1:3a1569ef7b3efadb87418d3ed38a6df0710cca1b + pristine_git_object: 0e2df81e3e75841d31bafd200697e9fd236b6fbe + src/mistralai/models/messageoutputevent.py: + id: d194af351767 + last_write_checksum: sha1:b9c4bf8db3d22d6b01d79044258729b5daafc050 + pristine_git_object: 751767a31666e839ec35d722707d97db605be25f + src/mistralai/models/metricout.py: + id: "369168426763" + last_write_checksum: sha1:d245a65254d0a142a154ee0f453cd7b64677e666 + pristine_git_object: 930b5c2181d4c5c5d89474b66fc1a4eef7ca7865 + src/mistralai/models/mistralerror.py: + id: 89288c78040b + last_write_checksum: sha1:07fce1e971a25d95ffa8c8f3624d62cdf96e353a + pristine_git_object: 28cfd22dc3d567aa4ae55cc19ad89341fa9c96a1 + src/mistralai/models/mistralpromptmode.py: + id: b2580604c1fe + last_write_checksum: sha1:1ac4d9fb8fbf0b21958be5483a569da7f1f49ff0 + pristine_git_object: ee82fb6d056e2d9699628698750e68b4ab6ef851 + src/mistralai/models/modelcapabilities.py: + id: a9589b97b15c + last_write_checksum: sha1:d7a7d530750418a54a5fc1698d855df7a519a45c + pristine_git_object: 4b5d5da7da9573f998e977e8a14a9b8f8cbf4f55 + src/mistralai/models/modelconversation.py: + id: 7d8b7b8d62a8 + last_write_checksum: sha1:b76cc407f807c19c1ff5602f7dd1d0421db2486d + pristine_git_object: 8eca4f973cd20e8bcb70a519f8dc3749878f04a2 + src/mistralai/models/modellist.py: + id: 22085995d513 + last_write_checksum: sha1:f753c11b430f8dd4daffb60bef467c6fa20f5e52 + pristine_git_object: 394cb3fa66a8881b013f78f1c8ee5440c9933427 + src/mistralai/models/moderationobject.py: + id: de835c5cd36e + last_write_checksum: sha1:24befa2934888192a12d9954749b8e591eb22582 + pristine_git_object: 5eff2d2a100c96eb7491ca99716fc9523fb74643 + src/mistralai/models/moderationresponse.py: + id: 831711e73705 + last_write_checksum: sha1:a96af206b8cd7c161c77cde0d3720880f20cf7f8 + pristine_git_object: ed13cd6bc226e8e505ef248760374c795705440f + src/mistralai/models/no_response_error.py: + id: 3102fe819ad6 + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai/models/ocrimageobject.py: + id: 44523566cf03 + last_write_checksum: sha1:75bb3b2eec938bd59052ea85244130770d787cbf + pristine_git_object: cec0acf4104ba7153270a1130ac2ac58a171b147 + src/mistralai/models/ocrpagedimensions.py: + id: 0d8589f80c1a + last_write_checksum: sha1:d62f216c61756592e6cde4a5d72b68eedeaddcc5 + pristine_git_object: d1aeb54d869545aec3ecaad1240f1be2059280f1 + src/mistralai/models/ocrpageobject.py: + id: 2dfef21e786f + last_write_checksum: sha1:667013bdfafb5ed0867fa9cd350455f66fee3e90 + pristine_git_object: 737defbaea323e0f3ccd95c2a721f57acc9f43a0 + src/mistralai/models/ocrrequest.py: + id: 7dbc4bb7cafb + last_write_checksum: sha1:b8a5efbd582bdf9e188d4777b319d2b16e0caf3d + pristine_git_object: 8bd133706746435af17898ee1afe78d94f2d1948 + src/mistralai/models/ocrresponse.py: + id: a187e70d8c2e + last_write_checksum: sha1:0c09aee803a5e1a3ba7c7f5d0ce46e96ee3339ca + pristine_git_object: 7b65bee7e6c0fffc7019f7843dcf88c0b5fade4e + src/mistralai/models/ocrtableobject.py: + id: 1be0c3cc027f + last_write_checksum: sha1:804d15ad21276f47f5ea9beccab9e471840ac32e + pristine_git_object: 5f30ab5e15dabf6a96498f46cf6178dca7fdb906 + src/mistralai/models/ocrusageinfo.py: + id: 91ab3d4cd57a + last_write_checksum: sha1:018eaf85ebffbb3392ed3c6688a41882a0893015 + pristine_git_object: 36c9f826cc64f67b254bdd07b00ad77857a91e1c + src/mistralai/models/outputcontentchunks.py: + id: 25ae74f4c9b8 + last_write_checksum: sha1:47d74d212ebcb68ff75a547b3221f2aee3e07bfe + pristine_git_object: ad0c087e0dcbe302dd9c73d1ea03e109e9a66ef7 + src/mistralai/models/paginationinfo.py: + id: 7e6919dfd6b1 + last_write_checksum: sha1:5ae05b383e9381862b8a980d83e73765b726294d + pristine_git_object: 00d4f1ec906e8485fdcb3e4b16a0b01acfa2be4b + src/mistralai/models/prediction.py: + id: ad77ec075e6d + last_write_checksum: sha1:d359ab3a37229212459228329219a1ec26a0381d + pristine_git_object: 582d87896b477de867cadf5e85d58ee71c445df3 + src/mistralai/models/processingstatusout.py: + id: 54d1c125ef83 + last_write_checksum: sha1:475749250ada2566c5a5d769eda1d350ddd8be8f + pristine_git_object: e67bfa865dcf94656a67f8612a5420f8b43cc0ec + src/mistralai/models/referencechunk.py: + id: 6cdbb4e60749 + last_write_checksum: sha1:48a4dddda06aadd16f6ea34c58848430bd561432 + pristine_git_object: 1864ac794d4e637556003cbb2bf91c10832d90f9 + src/mistralai/models/requestsource.py: + id: 1836766b9e81 + last_write_checksum: sha1:31aae791bf737ad123fe189227d113838204ed42 + pristine_git_object: 7b0a35c44050b6fca868479e261805a77f33e230 + src/mistralai/models/responsedoneevent.py: + id: 6300eaecde3c + last_write_checksum: sha1:693d832a480e943ff9c3e4f6822bea8358750ee1 + pristine_git_object: 5a3a3dfb8630713a618cc23f97660840e4fbbeca + src/mistralai/models/responseerrorevent.py: + id: 88185105876c + last_write_checksum: sha1:5adfc1acdba4035f1a646a7678dd09e16d05e747 + pristine_git_object: 6cb1b26885ad9ded4f75f226b0ce713206cb0a49 + src/mistralai/models/responseformat.py: + id: 6d5e093fdba8 + last_write_checksum: sha1:4c4a801671419f403263caafbd90dbae6e2203da + pristine_git_object: 92284017b5b895673e510a739bc5c5ed104de4af + src/mistralai/models/responseformats.py: + id: e5fccecf2b70 + last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + src/mistralai/models/responsestartedevent.py: + id: 37fbb3e37d75 + last_write_checksum: sha1:1d1eb4b486b2b92d167367d6525a8ea709d00c15 + pristine_git_object: d14d45ef8aa0d4e6dfa5893c52ae292f1f9a5780 + src/mistralai/models/responsevalidationerror.py: + id: 4b46e43f015b + last_write_checksum: sha1:c90231f7d7d3e93d6a36972ec4bead76fcb9ac47 + pristine_git_object: ed30165511c209289a030c5e9d9af1d2ad93d77c + src/mistralai/models/retrieve_model_v1_models_model_id_getop.py: + id: 81db6b688ded + last_write_checksum: sha1:8a7f0585855118e73fcd8f7213757172ac94c6fc + pristine_git_object: bfe62474610239f6e1ac0b5a4dc4b6ee9d321bd6 + src/mistralai/models/retrievefileout.py: + id: 5cf73a0007f0 + last_write_checksum: sha1:8f09f09299c2ac35f9d6ca9d091c01a2d3f6e087 + pristine_git_object: 7d734b0fa929b84f4910735f7ca22d5ed33b56ff + src/mistralai/models/sampletype.py: + id: d1558bd8d355 + last_write_checksum: sha1:fbfdf1616eb6b64d785c11f11a33fca794de19eb + pristine_git_object: efb43e9be278aa00cda9828c5c8cb3edabc68d0f + src/mistralai/models/sdkerror.py: + id: d3c914c3c63a + last_write_checksum: sha1:6d6dafaf73210b86ef2fea441e2e864752242737 + pristine_git_object: 65c45cf1c2cb4047e3cce21538890e5f62136f0f + src/mistralai/models/security.py: + id: 88dd24d389d4 + last_write_checksum: sha1:3d460b276d68380a64d8d91947981ce27d92e552 + pristine_git_object: cf05ba8fbce8d7b9199396c41ccd4c218d71998b + src/mistralai/models/shareenum.py: + id: 371f676fce97 + last_write_checksum: sha1:9061b04c7b26435911ea18b095d76400e1ab1698 + pristine_git_object: 634ba4b7e800e134f209fa851391b1a49cd6fc97 + src/mistralai/models/sharingdelete.py: + id: 334b4a8820ae + last_write_checksum: sha1:d3639265787b8de6d143228d7ef4b38c93a45ce7 + pristine_git_object: d1cd70749c3ada428e7f784eb198ff00e7c5779e + src/mistralai/models/sharingin.py: + id: b762157651b7 + last_write_checksum: sha1:885aa69fe92497c3ae0752f99f718f65cba6e702 + pristine_git_object: d3ada343004ddadfc20bd469937fcab1d8b2a81a + src/mistralai/models/sharingout.py: + id: "198686162036" + last_write_checksum: sha1:ae269a353d6733ac81ab6a4f3ea3368eef2a99ec + pristine_git_object: 12455818a5c1f44538696015bee079bce9567cdc + src/mistralai/models/source.py: + id: 6f2e7cd2285e + last_write_checksum: sha1:b0fe76d6566e4573317ad4c862ddc11423a8bde7 + pristine_git_object: cc3abce298c4b817081610238e489d4023ca6f3f + src/mistralai/models/ssetypes.py: + id: 7817469fd731 + last_write_checksum: sha1:1901bf6feee92ac100113e0a98dc0abe6e769375 + pristine_git_object: 796f0327cbb1372c1b2a817a7db39f8f185a59be + src/mistralai/models/systemmessage.py: + id: 0f0c7d12c400 + last_write_checksum: sha1:6886cc2f9603aabf75289ccc895e23ad45e65dc7 + pristine_git_object: 2b34607b39a1a99d6569985818a89d9e973f3cdd + src/mistralai/models/systemmessagecontentchunks.py: + id: 5a051e10f9df + last_write_checksum: sha1:bef0630a287d9000595a26049290b978c0816ddc + pristine_git_object: a1f04d1e5802521d4913b9ec1978c3b9d77ac38f + src/mistralai/models/textchunk.py: + id: 7dee31ce6ec3 + last_write_checksum: sha1:5ae5f498eaf03aa99354509c7558de42f7933c0c + pristine_git_object: 6052686ee52d3713ddce08f22c042bab2569f4da + src/mistralai/models/thinkchunk.py: + id: 8d0ee5d8ba9c + last_write_checksum: sha1:34f0cc91e66cb0ad46331b4e0385534d13b9ee1c + pristine_git_object: 627ae4883698696774b7a285a73326a4509c6828 + src/mistralai/models/timestampgranularity.py: + id: e0cb6c4efa2a + last_write_checksum: sha1:2b554048013632407c391444d972e29362751468 + pristine_git_object: 02816df67dd326a17d27dc815c49c6e1172693b8 + src/mistralai/models/tool.py: + id: c0a9b60b6cf1 + last_write_checksum: sha1:8f561cf5429605e43df7761f1dc3c62f4325c320 + pristine_git_object: 6e746df3234f8720eebd12413f10fe65753715f9 + src/mistralai/models/toolcall.py: + id: 08f53b1090d7 + last_write_checksum: sha1:743ad0c768cd32407e581a3f891017ade9dc4750 + pristine_git_object: 92dbb4a97b8a5b778c02d78394cb98d3a296c2c6 + src/mistralai/models/toolchoice.py: + id: de7498a868da + last_write_checksum: sha1:b25a6700a0b950f218c18321c1395ef71245d8a4 + pristine_git_object: 3b7d60e0bf0d732345f8869e549738dd16deaa2e + src/mistralai/models/toolchoiceenum.py: + id: 580f382c7857 + last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + src/mistralai/models/toolexecutiondeltaevent.py: + id: 674ab6adad2e + last_write_checksum: sha1:002e73c21df7e785268d77bad00b7967a514ede7 + pristine_git_object: 4fca46a80810a9976a0de70fef9e895be82fa921 + src/mistralai/models/toolexecutiondoneevent.py: + id: 86a2329a500d + last_write_checksum: sha1:00174f618358d49546ff8725a6dc3a9aebe5926c + pristine_git_object: 621d55718957c766c796f6f98814ed917ccbaadc + src/mistralai/models/toolexecutionentry.py: + id: 41e2484af138 + last_write_checksum: sha1:c05c9f72cf939d4da334489be57e952b2fbd68f9 + pristine_git_object: 9f70a63b720b120283adc1292188f1f0dd8086a1 + src/mistralai/models/toolexecutionstartedevent.py: + id: 0987fdd1cd45 + last_write_checksum: sha1:beab5d913fb60fc98ec81dffb4636143e23286ec + pristine_git_object: 80dd5e97084cdedcdb2752491a61d8b2aadb091a + src/mistralai/models/toolfilechunk.py: + id: 275d194f5a7b + last_write_checksum: sha1:85d075a91db18d413530938902dffc9cb4371a2c + pristine_git_object: d3e9dcd4ded86676767565c1a9b312502f614a61 + src/mistralai/models/toolmessage.py: + id: dff99c41aecf + last_write_checksum: sha1:19fbda605416fcc20f842b6d3067f64de2691246 + pristine_git_object: ef917c4369a7459e70f04da2c20ed62b9316d9bc + src/mistralai/models/toolreferencechunk.py: + id: 5e3482e21a7e + last_write_checksum: sha1:a0f1e4624b6107594f2a7626b7f11465c88aa5a9 + pristine_git_object: 48035484fea087dcd34dfdaf29651b413ccb6645 + src/mistralai/models/tooltypes.py: + id: c4ef111ec45b + last_write_checksum: sha1:f9cd152556d95e9e197ac0c10f65303789e28bcb + pristine_git_object: f54893c259518313218d9ee307669c291a8c0cf8 + src/mistralai/models/trainingfile.py: + id: 150e9031690e + last_write_checksum: sha1:f20266317087b92eb74ed8cd48e7477666faf9a8 + pristine_git_object: 99bd49dd760960558be40adf138f9b4b95ee62d9 + src/mistralai/models/transcriptionresponse.py: + id: b50f2e392e31 + last_write_checksum: sha1:79d57bf44dbad0f364ac57ad967642271b7a7526 + pristine_git_object: 54a98a5ba7b83a6b7f6a39046b400a61e9889898 + src/mistralai/models/transcriptionsegmentchunk.py: + id: ccd6d5675b49 + last_write_checksum: sha1:367abd8a8182d9db9f2b19540aed2b974ad7bbe2 + pristine_git_object: aa30f053a624b25c7fd1739c05f406a81873ff60 + src/mistralai/models/transcriptionstreamdone.py: + id: 42177659bf0f + last_write_checksum: sha1:5fda2b766b2af41749006835e45c95f708eddb28 + pristine_git_object: e1b1ab3d6f257786a5180f6876f47d47414e7e72 + src/mistralai/models/transcriptionstreamevents.py: + id: 9593874b7574 + last_write_checksum: sha1:ace344cfbec0af2ad43b0b61ae444e34f9e9da99 + pristine_git_object: 8207c03fef9d76ca7405b85d93c2f462eae22329 + src/mistralai/models/transcriptionstreameventtypes.py: + id: e2e35365ad39 + last_write_checksum: sha1:38b7334aebf400e1abb2b20b0f2890880f0fc2f7 + pristine_git_object: 4a910f0abca2912746cac60fd5a16bd5464f2457 + src/mistralai/models/transcriptionstreamlanguage.py: + id: 635759ec85f3 + last_write_checksum: sha1:93e389c2c8b41e378cfe7f88f05d8312236024e6 + pristine_git_object: 15b7514415e536bb04fd1a69ccea20615b5b1fcf + src/mistralai/models/transcriptionstreamsegmentdelta.py: + id: 83d02b065099 + last_write_checksum: sha1:1f48714d450fff004f9cf24b81749848240fe722 + pristine_git_object: d779ed837913c8c13a4599a06a2ed75afa827a48 + src/mistralai/models/transcriptionstreamtextdelta.py: + id: ce0861d8affd + last_write_checksum: sha1:84a3b6c6d84a896e59e2874de59d812d3db657a5 + pristine_git_object: daee151f4ceaaee6c224b6dd078b4dfb680495b3 + src/mistralai/models/unarchiveftmodelout.py: + id: d758d3dee216 + last_write_checksum: sha1:b60e3292d2c4e6bf1456649184eaef4c75732cfc + pristine_git_object: 55c0ea8aa841ecef08f64020f099353efbdbcf7d + src/mistralai/models/updateftmodelin.py: + id: dbf79e18efd0 + last_write_checksum: sha1:aab40882f622a32054d73e33ca2be279bb880080 + pristine_git_object: 1bd0eaf2eb9b3427da6f4581b36d4316c0d129bf + src/mistralai/models/uploadfileout.py: + id: 1fa81af96888 + last_write_checksum: sha1:86c62388807efc506dee5dab1a7c7cac2dec06de + pristine_git_object: 8f9f1067a191bbb67c4950a8cf9beaced72833b1 + src/mistralai/models/usageinfo.py: + id: 62e303fb96aa + last_write_checksum: sha1:7f81b8c11fb5076e03a9fa40865382c9b45b700e + pristine_git_object: cedad5c12a96418567294e91812bfd96dce875bf + src/mistralai/models/usermessage.py: + id: dd10edab3b81 + last_write_checksum: sha1:a22b667ed90d8e34923d36422ef7ea6ae83d2dd7 + pristine_git_object: 61590bed06e1a397a1166a04a0b2405b833d19ff + src/mistralai/models/validationerror.py: + id: 0c6798c22859 + last_write_checksum: sha1:be4e31bc68c0eed17cd16679064760ac1f035d7b + pristine_git_object: e971e016d64237f24d86c171222f66575152fd1f + src/mistralai/models/wandbintegration.py: + id: a2f0944d8dbd + last_write_checksum: sha1:43a3c6f8d77cde042cfa129954f48c419d3fe1b9 + pristine_git_object: 690538963550d6adaf291fab8344f317c3c9080e + src/mistralai/models/wandbintegrationout.py: + id: bfae63e4ff4c + last_write_checksum: sha1:843e286ce58f072f27e8cb67b4c4f35001ffe0f0 + pristine_git_object: f5a9ba802b489f595bfc2578b9f3456b5230bdb3 + src/mistralai/models/websearchpremiumtool.py: + id: "710695472090" + last_write_checksum: sha1:85a562f976a03e9a3a659018caa78d2e26caeef9 + pristine_git_object: 3bbe753acb99f74f8eb7aa63a387f35714b0a259 + src/mistralai/models/websearchtool.py: + id: d8f773002c11 + last_write_checksum: sha1:1e48212c4cc43bf937a3d21837878a1722666a30 + pristine_git_object: eeafecb4847e66075b64dc34512aaca7a045900b + src/mistralai/models_.py: + id: dfcd71fd4c33 + last_write_checksum: sha1:076e72b91c364f1a4905092b02e2ad7ebf7765c6 + pristine_git_object: d44930a0db06117ba538424273935016a133e0ae + src/mistralai/ocr.py: + id: e23da68c9ae8 + last_write_checksum: sha1:dbcaf037e0efeb2709fafb748ea0a02cb1076597 + pristine_git_object: a775511f7f70b2c1ebebcdeaf159b9048a96f896 + src/mistralai/py.typed: + id: 3923b7c50c56 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai/sdk.py: + id: b2a76476b492 + last_write_checksum: sha1:f0ce70fdd61fc69a6afb59a46b42719c14e429d8 + pristine_git_object: c83b53e0445788e27d0e451030807f1c6b86560b + src/mistralai/sdkconfiguration.py: + id: e6e7f1fb8b52 + last_write_checksum: sha1:63a0ae64777a9d39debeb6ef36ac6d71dadc6d80 + pristine_git_object: 7e77925ddc9aa01c4e5f8ba2ca52aa7f32e89859 + src/mistralai/transcriptions.py: + id: ba6b040274f2 + last_write_checksum: sha1:d555d42ff344d77befbe0c8e41fb4b93d472d834 + pristine_git_object: 33b0d83bc1edb5b929e9955145fbc99d164a325a + src/mistralai/types/__init__.py: + id: b89b8375c971 + last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed + pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + src/mistralai/types/basemodel.py: + id: 18149749a011 + last_write_checksum: sha1:615d0b364fa924b0fef719958df34596cc7c1ae2 + pristine_git_object: 231c2e37283a76082f1a064c7aae47f8ee4ee694 + src/mistralai/utils/__init__.py: + id: 6f6ad3db2456 + last_write_checksum: sha1:5d88356c206a32c694ae113178ca6325a9fc612a + pristine_git_object: 87192ddef2589a7778f85e1cd715d6d1e61ae0a6 + src/mistralai/utils/annotations.py: + id: 76966ef1943a + last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc + pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + src/mistralai/utils/datetimes.py: + id: a0aa72e39d40 + last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 + pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai/utils/enums.py: + id: 400af6d98484 + last_write_checksum: sha1:786ba597f79dca6fbc0d87c591752bb8d775ecb7 + pristine_git_object: c3bc13cfc48794c143a64667f02e7949a8ce3fcc + src/mistralai/utils/eventstreaming.py: + id: 7b58f8ceb28e + last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b + pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + src/mistralai/utils/forms.py: + id: a584268d234f + last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 + pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + src/mistralai/utils/headers.py: + id: 3b4141506f5a + last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 + pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + src/mistralai/utils/logger.py: + id: e35e15a1b67e + last_write_checksum: sha1:23efbe8d8d3b9412877f3cd35b37477d0e460a2f + pristine_git_object: cc08930715f6f03a559a2f30c3a9482071a3e1e2 + src/mistralai/utils/metadata.py: + id: 617f23c58d0d + last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 + pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + src/mistralai/utils/queryparams.py: + id: 6d86b06d25db + last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 + pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + src/mistralai/utils/requestbodies.py: + id: 09529564c402 + last_write_checksum: sha1:e0a3a78158eba39880475d62d61be906625676b8 + pristine_git_object: d5240dd5f5efffabbd9aefa2f4a349511a9c75b4 + src/mistralai/utils/retries.py: + id: 3c8dad479e7d + last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 + pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + src/mistralai/utils/security.py: + id: e8a6622acc38 + last_write_checksum: sha1:d86d2fd73cbb4a77f72395c10fff1c700efcf42e + pristine_git_object: 3b8526bfdf5c9c871ed184a2ec785f7bc1ebe57e + src/mistralai/utils/serializers.py: + id: e3688f9815db + last_write_checksum: sha1:a0d184ace7371a14a7d005cca7f358a03e3d4b07 + pristine_git_object: 378a14c0f86a867ca7b0eb7e620da82234c0ccc4 + src/mistralai/utils/unmarshal_json_response.py: + id: 3bc4add4e1b6 + last_write_checksum: sha1:0b7b57b8a97ff6bfbb4dea22d59b8aade9a487f2 + pristine_git_object: 64d0b3a6c59921ac0a5fb05d52ba47d0b696ae0e + src/mistralai/utils/url.py: + id: 8aa618817e83 + last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 + pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + src/mistralai/utils/values.py: + id: 3b1394457cf4 + last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 + pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 examples: list_models_v1_models_get: speakeasy-default-list-models-v1-models-get: @@ -1531,4 +3709,738 @@ examples: application/json: {} examplesVersion: 1.0.2 generatedTests: {} -releaseNotes: "## SDK Changes Detected:\n* `mistral.beta.libraries.create()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.get()`: `response` **Changed** **Breaking** :warning:\n* `mistral.models.list()`: \n * `response.data.[].[base].capabilities` **Changed**\n * `error.status[422]` **Removed** **Breaking** :warning:\n* `mistral.files.list()`: \n * `request.include_total` **Added**\n * `response.total` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.start()`: \n * `request` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.delete()`: \n * `request.org_id` **Changed**\n * `response.share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.update_or_create()`: \n * `request.org_id` **Changed**\n * `response.share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.append()`: \n * `request.inputs.[array].[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.list()`: `response.data.[].share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.restart()`: \n * `request` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.update()`: \n * `request.attributes` **Added**\n * `response` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.upload()`: `response` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.list()`: \n * `request.filters_attributes` **Added**\n * `response.data.[]` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.update()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.delete()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.get()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.get_history()`: `response.entries.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.list()`: `response.data.[].owner_id` **Changed** **Breaking** :warning:\n* `mistral.models.retrieve()`: `response.[base].capabilities` **Changed**\n* `mistral.agents.complete()`: `request.metadata` **Added**\n* `mistral.beta.agents.get()`: \n * `request.agent_version` **Added**\n * `response` **Changed**\n* `mistral.beta.agents.list()`: \n * `request` **Changed**\n * `response.[]` **Changed**\n* `mistral.beta.agents.update_version()`: `response` **Changed**\n* `mistral.beta.agents.delete()`: **Added**\n* `mistral.beta.conversations.list()`: \n * `request.metadata` **Added**\n * `response.[]` **Changed**\n* `mistral.beta.conversations.get()`: `response` **Changed**\n* `mistral.beta.agents.update()`: \n * `request` **Changed**\n * `response` **Changed**\n* `mistral.beta.conversations.delete()`: **Added**\n* `mistral.chat.complete()`: `request.metadata` **Added**\n* `mistral.fim.complete()`: `request.metadata` **Added**\n* `mistral.beta.agents.create()`: \n * `request.metadata` **Added**\n * `response` **Changed**\n* `mistral.ocr.process()`: \n * `request` **Changed**\n * `response.pages.[]` **Changed**\n" +generatedFiles: + - .gitattributes + - .vscode/settings.json + - USAGE.md + - docs/models/agent.md + - docs/models/agentconversation.md + - docs/models/agentconversationobject.md + - docs/models/agentcreationrequest.md + - docs/models/agentcreationrequesttools.md + - docs/models/agenthandoffdoneevent.md + - docs/models/agenthandoffdoneeventtype.md + - docs/models/agenthandoffentry.md + - docs/models/agenthandoffentryobject.md + - docs/models/agenthandoffentrytype.md + - docs/models/agenthandoffstartedevent.md + - docs/models/agenthandoffstartedeventtype.md + - docs/models/agentobject.md + - docs/models/agentsapiv1agentsdeleterequest.md + - docs/models/agentsapiv1agentsgetrequest.md + - docs/models/agentsapiv1agentslistrequest.md + - docs/models/agentsapiv1agentsupdaterequest.md + - docs/models/agentsapiv1agentsupdateversionrequest.md + - docs/models/agentsapiv1conversationsappendrequest.md + - docs/models/agentsapiv1conversationsappendstreamrequest.md + - docs/models/agentsapiv1conversationsdeleterequest.md + - docs/models/agentsapiv1conversationsgetrequest.md + - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md + - docs/models/agentsapiv1conversationshistoryrequest.md + - docs/models/agentsapiv1conversationslistrequest.md + - docs/models/agentsapiv1conversationsmessagesrequest.md + - docs/models/agentsapiv1conversationsrestartrequest.md + - docs/models/agentsapiv1conversationsrestartstreamrequest.md + - docs/models/agentscompletionrequest.md + - docs/models/agentscompletionrequestmessages.md + - docs/models/agentscompletionrequeststop.md + - docs/models/agentscompletionrequesttoolchoice.md + - docs/models/agentscompletionstreamrequest.md + - docs/models/agentscompletionstreamrequestmessages.md + - docs/models/agentscompletionstreamrequeststop.md + - docs/models/agentscompletionstreamrequesttoolchoice.md + - docs/models/agenttools.md + - docs/models/agentupdaterequest.md + - docs/models/agentupdaterequesttools.md + - docs/models/apiendpoint.md + - docs/models/archiveftmodelout.md + - docs/models/archiveftmodeloutobject.md + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md + - docs/models/assistantmessagerole.md + - docs/models/attributes.md + - docs/models/audiochunk.md + - docs/models/audiochunktype.md + - docs/models/audiotranscriptionrequest.md + - docs/models/audiotranscriptionrequeststream.md + - docs/models/basemodelcard.md + - docs/models/basemodelcardtype.md + - docs/models/batcherror.md + - docs/models/batchjobin.md + - docs/models/batchjobout.md + - docs/models/batchjoboutobject.md + - docs/models/batchjobsout.md + - docs/models/batchjobsoutobject.md + - docs/models/batchjobstatus.md + - docs/models/builtinconnectors.md + - docs/models/chatclassificationrequest.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequestmessages.md + - docs/models/chatcompletionstreamrequeststop.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/chatmoderationrequest.md + - docs/models/chatmoderationrequestinputs.md + - docs/models/checkpointout.md + - docs/models/classificationrequest.md + - docs/models/classificationrequestinputs.md + - docs/models/classificationresponse.md + - docs/models/classificationtargetresult.md + - docs/models/classifierdetailedjobout.md + - docs/models/classifierdetailedjoboutintegrations.md + - docs/models/classifierdetailedjoboutjobtype.md + - docs/models/classifierdetailedjoboutobject.md + - docs/models/classifierdetailedjoboutstatus.md + - docs/models/classifierftmodelout.md + - docs/models/classifierftmodeloutmodeltype.md + - docs/models/classifierftmodeloutobject.md + - docs/models/classifierjobout.md + - docs/models/classifierjoboutintegrations.md + - docs/models/classifierjoboutjobtype.md + - docs/models/classifierjoboutobject.md + - docs/models/classifierjoboutstatus.md + - docs/models/classifiertargetin.md + - docs/models/classifiertargetout.md + - docs/models/classifiertrainingparameters.md + - docs/models/classifiertrainingparametersin.md + - docs/models/codeinterpretertool.md + - docs/models/codeinterpretertooltype.md + - docs/models/completionargs.md + - docs/models/completionargsstop.md + - docs/models/completionchunk.md + - docs/models/completiondetailedjobout.md + - docs/models/completiondetailedjoboutintegrations.md + - docs/models/completiondetailedjoboutjobtype.md + - docs/models/completiondetailedjoboutobject.md + - docs/models/completiondetailedjoboutrepositories.md + - docs/models/completiondetailedjoboutstatus.md + - docs/models/completionevent.md + - docs/models/completionftmodelout.md + - docs/models/completionftmodeloutobject.md + - docs/models/completionjobout.md + - docs/models/completionjoboutobject.md + - docs/models/completionresponsestreamchoice.md + - docs/models/completionresponsestreamchoicefinishreason.md + - docs/models/completiontrainingparameters.md + - docs/models/completiontrainingparametersin.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/conversationappendrequest.md + - docs/models/conversationappendrequesthandoffexecution.md + - docs/models/conversationappendstreamrequest.md + - docs/models/conversationappendstreamrequesthandoffexecution.md + - docs/models/conversationevents.md + - docs/models/conversationeventsdata.md + - docs/models/conversationhistory.md + - docs/models/conversationhistoryobject.md + - docs/models/conversationinputs.md + - docs/models/conversationmessages.md + - docs/models/conversationmessagesobject.md + - docs/models/conversationrequest.md + - docs/models/conversationresponse.md + - docs/models/conversationresponseobject.md + - docs/models/conversationrestartrequest.md + - docs/models/conversationrestartrequesthandoffexecution.md + - docs/models/conversationrestartstreamrequest.md + - docs/models/conversationrestartstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequest.md + - docs/models/conversationstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequesttools.md + - docs/models/conversationusageinfo.md + - docs/models/data.md + - docs/models/deletefileout.md + - docs/models/deletemodelout.md + - docs/models/deletemodelv1modelsmodeliddeleterequest.md + - docs/models/deltamessage.md + - docs/models/document.md + - docs/models/documentlibrarytool.md + - docs/models/documentlibrarytooltype.md + - docs/models/documentout.md + - docs/models/documenttextcontent.md + - docs/models/documentupdatein.md + - docs/models/documenturlchunk.md + - docs/models/documenturlchunktype.md + - docs/models/embeddingdtype.md + - docs/models/embeddingrequest.md + - docs/models/embeddingrequestinputs.md + - docs/models/embeddingresponse.md + - docs/models/embeddingresponsedata.md + - docs/models/encodingformat.md + - docs/models/entitytype.md + - docs/models/entries.md + - docs/models/eventout.md + - docs/models/file.md + - docs/models/filechunk.md + - docs/models/filepurpose.md + - docs/models/filesapiroutesdeletefilerequest.md + - docs/models/filesapiroutesdownloadfilerequest.md + - docs/models/filesapiroutesgetsignedurlrequest.md + - docs/models/filesapirouteslistfilesrequest.md + - docs/models/filesapiroutesretrievefilerequest.md + - docs/models/filesapiroutesuploadfilemultipartbodyparams.md + - docs/models/fileschema.md + - docs/models/filesignedurl.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finetuneablemodeltype.md + - docs/models/finishreason.md + - docs/models/format_.md + - docs/models/ftclassifierlossfunction.md + - docs/models/ftmodelcapabilitiesout.md + - docs/models/ftmodelcard.md + - docs/models/ftmodelcardtype.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functioncallentry.md + - docs/models/functioncallentryarguments.md + - docs/models/functioncallentryobject.md + - docs/models/functioncallentrytype.md + - docs/models/functioncallevent.md + - docs/models/functioncalleventtype.md + - docs/models/functionname.md + - docs/models/functionresultentry.md + - docs/models/functionresultentryobject.md + - docs/models/functionresultentrytype.md + - docs/models/functiontool.md + - docs/models/functiontooltype.md + - docs/models/githubrepositoryin.md + - docs/models/githubrepositoryintype.md + - docs/models/githubrepositoryout.md + - docs/models/githubrepositoryouttype.md + - docs/models/handoffexecution.md + - docs/models/httpvalidationerror.md + - docs/models/hyperparameters.md + - docs/models/imagegenerationtool.md + - docs/models/imagegenerationtooltype.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/inputentries.md + - docs/models/inputs.md + - docs/models/instructrequest.md + - docs/models/instructrequestinputs.md + - docs/models/instructrequestinputsmessages.md + - docs/models/instructrequestmessages.md + - docs/models/integrations.md + - docs/models/jobin.md + - docs/models/jobinintegrations.md + - docs/models/jobinrepositories.md + - docs/models/jobmetadataout.md + - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md + - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md + - docs/models/jobsout.md + - docs/models/jobsoutdata.md + - docs/models/jobsoutobject.md + - docs/models/jobtype.md + - docs/models/jsonschema.md + - docs/models/legacyjobmetadataout.md + - docs/models/legacyjobmetadataoutobject.md + - docs/models/librariesdeletev1request.md + - docs/models/librariesdocumentsdeletev1request.md + - docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md + - docs/models/librariesdocumentsgetsignedurlv1request.md + - docs/models/librariesdocumentsgetstatusv1request.md + - docs/models/librariesdocumentsgettextcontentv1request.md + - docs/models/librariesdocumentsgetv1request.md + - docs/models/librariesdocumentslistv1request.md + - docs/models/librariesdocumentsreprocessv1request.md + - docs/models/librariesdocumentsupdatev1request.md + - docs/models/librariesdocumentsuploadv1documentupload.md + - docs/models/librariesdocumentsuploadv1request.md + - docs/models/librariesgetv1request.md + - docs/models/librariessharecreatev1request.md + - docs/models/librariessharedeletev1request.md + - docs/models/librariessharelistv1request.md + - docs/models/librariesupdatev1request.md + - docs/models/libraryin.md + - docs/models/libraryinupdate.md + - docs/models/libraryout.md + - docs/models/listdocumentout.md + - docs/models/listfilesout.md + - docs/models/listlibraryout.md + - docs/models/listsharingout.md + - docs/models/loc.md + - docs/models/messageentries.md + - docs/models/messageinputcontentchunks.md + - docs/models/messageinputentry.md + - docs/models/messageinputentrycontent.md + - docs/models/messageinputentryrole.md + - docs/models/messageinputentrytype.md + - docs/models/messageoutputcontentchunks.md + - docs/models/messageoutputentry.md + - docs/models/messageoutputentrycontent.md + - docs/models/messageoutputentryobject.md + - docs/models/messageoutputentryrole.md + - docs/models/messageoutputentrytype.md + - docs/models/messageoutputevent.md + - docs/models/messageoutputeventcontent.md + - docs/models/messageoutputeventrole.md + - docs/models/messageoutputeventtype.md + - docs/models/messages.md + - docs/models/metricout.md + - docs/models/mistralpromptmode.md + - docs/models/modelcapabilities.md + - docs/models/modelconversation.md + - docs/models/modelconversationobject.md + - docs/models/modelconversationtools.md + - docs/models/modellist.md + - docs/models/modeltype.md + - docs/models/moderationobject.md + - docs/models/moderationresponse.md + - docs/models/name.md + - docs/models/object.md + - docs/models/ocrimageobject.md + - docs/models/ocrpagedimensions.md + - docs/models/ocrpageobject.md + - docs/models/ocrrequest.md + - docs/models/ocrresponse.md + - docs/models/ocrtableobject.md + - docs/models/ocrusageinfo.md + - docs/models/one.md + - docs/models/outputcontentchunks.md + - docs/models/outputs.md + - docs/models/paginationinfo.md + - docs/models/prediction.md + - docs/models/processingstatusout.md + - docs/models/queryparamstatus.md + - docs/models/referencechunk.md + - docs/models/referencechunktype.md + - docs/models/repositories.md + - docs/models/requestsource.md + - docs/models/response1.md + - docs/models/responsebody.md + - docs/models/responsedoneevent.md + - docs/models/responsedoneeventtype.md + - docs/models/responseerrorevent.md + - docs/models/responseerroreventtype.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/responsestartedevent.md + - docs/models/responsestartedeventtype.md + - docs/models/retrievefileout.md + - docs/models/retrievemodelv1modelsmodelidgetrequest.md + - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md + - docs/models/role.md + - docs/models/sampletype.md + - docs/models/security.md + - docs/models/shareenum.md + - docs/models/sharingdelete.md + - docs/models/sharingin.md + - docs/models/sharingout.md + - docs/models/source.md + - docs/models/ssetypes.md + - docs/models/status.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md + - docs/models/tableformat.md + - docs/models/textchunk.md + - docs/models/textchunktype.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md + - docs/models/timestampgranularity.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolexecutiondeltaevent.md + - docs/models/toolexecutiondeltaeventname.md + - docs/models/toolexecutiondeltaeventtype.md + - docs/models/toolexecutiondoneevent.md + - docs/models/toolexecutiondoneeventname.md + - docs/models/toolexecutiondoneeventtype.md + - docs/models/toolexecutionentry.md + - docs/models/toolexecutionentryobject.md + - docs/models/toolexecutionentrytype.md + - docs/models/toolexecutionstartedevent.md + - docs/models/toolexecutionstartedeventname.md + - docs/models/toolexecutionstartedeventtype.md + - docs/models/toolfilechunk.md + - docs/models/toolfilechunktype.md + - docs/models/toolmessage.md + - docs/models/toolmessagecontent.md + - docs/models/toolmessagerole.md + - docs/models/toolreferencechunk.md + - docs/models/toolreferencechunktype.md + - docs/models/tools.md + - docs/models/tooltypes.md + - docs/models/trainingfile.md + - docs/models/transcriptionresponse.md + - docs/models/transcriptionsegmentchunk.md + - docs/models/transcriptionstreamdone.md + - docs/models/transcriptionstreamdonetype.md + - docs/models/transcriptionstreamevents.md + - docs/models/transcriptionstreameventsdata.md + - docs/models/transcriptionstreameventtypes.md + - docs/models/transcriptionstreamlanguage.md + - docs/models/transcriptionstreamlanguagetype.md + - docs/models/transcriptionstreamsegmentdelta.md + - docs/models/transcriptionstreamsegmentdeltatype.md + - docs/models/transcriptionstreamtextdelta.md + - docs/models/transcriptionstreamtextdeltatype.md + - docs/models/two.md + - docs/models/type.md + - docs/models/unarchiveftmodelout.md + - docs/models/unarchiveftmodeloutobject.md + - docs/models/updateftmodelin.md + - docs/models/uploadfileout.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md + - docs/models/wandbintegration.md + - docs/models/wandbintegrationout.md + - docs/models/wandbintegrationouttype.md + - docs/models/wandbintegrationtype.md + - docs/models/websearchpremiumtool.md + - docs/models/websearchpremiumtooltype.md + - docs/models/websearchtool.md + - docs/models/websearchtooltype.md + - docs/sdks/accesses/README.md + - docs/sdks/agents/README.md + - docs/sdks/audio/README.md + - docs/sdks/batch/README.md + - docs/sdks/beta/README.md + - docs/sdks/chat/README.md + - docs/sdks/classifiers/README.md + - docs/sdks/conversations/README.md + - docs/sdks/documents/README.md + - docs/sdks/embeddings/README.md + - docs/sdks/files/README.md + - docs/sdks/fim/README.md + - docs/sdks/finetuning/README.md + - docs/sdks/jobs/README.md + - docs/sdks/libraries/README.md + - docs/sdks/mistral/README.md + - docs/sdks/mistralagents/README.md + - docs/sdks/mistraljobs/README.md + - docs/sdks/models/README.md + - docs/sdks/ocr/README.md + - docs/sdks/transcriptions/README.md + - poetry.toml + - py.typed + - scripts/prepare_readme.py + - scripts/publish.sh + - src/mistralai/__init__.py + - src/mistralai/_hooks/__init__.py + - src/mistralai/_hooks/sdkhooks.py + - src/mistralai/_hooks/types.py + - src/mistralai/_version.py + - src/mistralai/accesses.py + - src/mistralai/agents.py + - src/mistralai/audio.py + - src/mistralai/basesdk.py + - src/mistralai/batch.py + - src/mistralai/beta.py + - src/mistralai/chat.py + - src/mistralai/classifiers.py + - src/mistralai/conversations.py + - src/mistralai/documents.py + - src/mistralai/embeddings.py + - src/mistralai/files.py + - src/mistralai/fim.py + - src/mistralai/fine_tuning.py + - src/mistralai/httpclient.py + - src/mistralai/jobs.py + - src/mistralai/libraries.py + - src/mistralai/mistral_agents.py + - src/mistralai/mistral_jobs.py + - src/mistralai/models/__init__.py + - src/mistralai/models/agent.py + - src/mistralai/models/agentconversation.py + - src/mistralai/models/agentcreationrequest.py + - src/mistralai/models/agenthandoffdoneevent.py + - src/mistralai/models/agenthandoffentry.py + - src/mistralai/models/agenthandoffstartedevent.py + - src/mistralai/models/agents_api_v1_agents_deleteop.py + - src/mistralai/models/agents_api_v1_agents_getop.py + - src/mistralai/models/agents_api_v1_agents_listop.py + - src/mistralai/models/agents_api_v1_agents_update_versionop.py + - src/mistralai/models/agents_api_v1_agents_updateop.py + - src/mistralai/models/agents_api_v1_conversations_append_streamop.py + - src/mistralai/models/agents_api_v1_conversations_appendop.py + - src/mistralai/models/agents_api_v1_conversations_deleteop.py + - src/mistralai/models/agents_api_v1_conversations_getop.py + - src/mistralai/models/agents_api_v1_conversations_historyop.py + - src/mistralai/models/agents_api_v1_conversations_listop.py + - src/mistralai/models/agents_api_v1_conversations_messagesop.py + - src/mistralai/models/agents_api_v1_conversations_restart_streamop.py + - src/mistralai/models/agents_api_v1_conversations_restartop.py + - src/mistralai/models/agentscompletionrequest.py + - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/agentupdaterequest.py + - src/mistralai/models/apiendpoint.py + - src/mistralai/models/archiveftmodelout.py + - src/mistralai/models/assistantmessage.py + - src/mistralai/models/audiochunk.py + - src/mistralai/models/audiotranscriptionrequest.py + - src/mistralai/models/audiotranscriptionrequeststream.py + - src/mistralai/models/basemodelcard.py + - src/mistralai/models/batcherror.py + - src/mistralai/models/batchjobin.py + - src/mistralai/models/batchjobout.py + - src/mistralai/models/batchjobsout.py + - src/mistralai/models/batchjobstatus.py + - src/mistralai/models/builtinconnectors.py + - src/mistralai/models/chatclassificationrequest.py + - src/mistralai/models/chatcompletionchoice.py + - src/mistralai/models/chatcompletionrequest.py + - src/mistralai/models/chatcompletionresponse.py + - src/mistralai/models/chatcompletionstreamrequest.py + - src/mistralai/models/chatmoderationrequest.py + - src/mistralai/models/checkpointout.py + - src/mistralai/models/classificationrequest.py + - src/mistralai/models/classificationresponse.py + - src/mistralai/models/classificationtargetresult.py + - src/mistralai/models/classifierdetailedjobout.py + - src/mistralai/models/classifierftmodelout.py + - src/mistralai/models/classifierjobout.py + - src/mistralai/models/classifiertargetin.py + - src/mistralai/models/classifiertargetout.py + - src/mistralai/models/classifiertrainingparameters.py + - src/mistralai/models/classifiertrainingparametersin.py + - src/mistralai/models/codeinterpretertool.py + - src/mistralai/models/completionargs.py + - src/mistralai/models/completionargsstop.py + - src/mistralai/models/completionchunk.py + - src/mistralai/models/completiondetailedjobout.py + - src/mistralai/models/completionevent.py + - src/mistralai/models/completionftmodelout.py + - src/mistralai/models/completionjobout.py + - src/mistralai/models/completionresponsestreamchoice.py + - src/mistralai/models/completiontrainingparameters.py + - src/mistralai/models/completiontrainingparametersin.py + - src/mistralai/models/contentchunk.py + - src/mistralai/models/conversationappendrequest.py + - src/mistralai/models/conversationappendstreamrequest.py + - src/mistralai/models/conversationevents.py + - src/mistralai/models/conversationhistory.py + - src/mistralai/models/conversationinputs.py + - src/mistralai/models/conversationmessages.py + - src/mistralai/models/conversationrequest.py + - src/mistralai/models/conversationresponse.py + - src/mistralai/models/conversationrestartrequest.py + - src/mistralai/models/conversationrestartstreamrequest.py + - src/mistralai/models/conversationstreamrequest.py + - src/mistralai/models/conversationusageinfo.py + - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py + - src/mistralai/models/deletefileout.py + - src/mistralai/models/deletemodelout.py + - src/mistralai/models/deltamessage.py + - src/mistralai/models/documentlibrarytool.py + - src/mistralai/models/documentout.py + - src/mistralai/models/documenttextcontent.py + - src/mistralai/models/documentupdatein.py + - src/mistralai/models/documenturlchunk.py + - src/mistralai/models/embeddingdtype.py + - src/mistralai/models/embeddingrequest.py + - src/mistralai/models/embeddingresponse.py + - src/mistralai/models/embeddingresponsedata.py + - src/mistralai/models/encodingformat.py + - src/mistralai/models/entitytype.py + - src/mistralai/models/eventout.py + - src/mistralai/models/file.py + - src/mistralai/models/filechunk.py + - src/mistralai/models/filepurpose.py + - src/mistralai/models/files_api_routes_delete_fileop.py + - src/mistralai/models/files_api_routes_download_fileop.py + - src/mistralai/models/files_api_routes_get_signed_urlop.py + - src/mistralai/models/files_api_routes_list_filesop.py + - src/mistralai/models/files_api_routes_retrieve_fileop.py + - src/mistralai/models/files_api_routes_upload_fileop.py + - src/mistralai/models/fileschema.py + - src/mistralai/models/filesignedurl.py + - src/mistralai/models/fimcompletionrequest.py + - src/mistralai/models/fimcompletionresponse.py + - src/mistralai/models/fimcompletionstreamrequest.py + - src/mistralai/models/finetuneablemodeltype.py + - src/mistralai/models/ftclassifierlossfunction.py + - src/mistralai/models/ftmodelcapabilitiesout.py + - src/mistralai/models/ftmodelcard.py + - src/mistralai/models/function.py + - src/mistralai/models/functioncall.py + - src/mistralai/models/functioncallentry.py + - src/mistralai/models/functioncallentryarguments.py + - src/mistralai/models/functioncallevent.py + - src/mistralai/models/functionname.py + - src/mistralai/models/functionresultentry.py + - src/mistralai/models/functiontool.py + - src/mistralai/models/githubrepositoryin.py + - src/mistralai/models/githubrepositoryout.py + - src/mistralai/models/httpvalidationerror.py + - src/mistralai/models/imagegenerationtool.py + - src/mistralai/models/imageurl.py + - src/mistralai/models/imageurlchunk.py + - src/mistralai/models/inputentries.py + - src/mistralai/models/inputs.py + - src/mistralai/models/instructrequest.py + - src/mistralai/models/jobin.py + - src/mistralai/models/jobmetadataout.py + - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py + - src/mistralai/models/jobsout.py + - src/mistralai/models/jsonschema.py + - src/mistralai/models/legacyjobmetadataout.py + - src/mistralai/models/libraries_delete_v1op.py + - src/mistralai/models/libraries_documents_delete_v1op.py + - src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py + - src/mistralai/models/libraries_documents_get_signed_url_v1op.py + - src/mistralai/models/libraries_documents_get_status_v1op.py + - src/mistralai/models/libraries_documents_get_text_content_v1op.py + - src/mistralai/models/libraries_documents_get_v1op.py + - src/mistralai/models/libraries_documents_list_v1op.py + - src/mistralai/models/libraries_documents_reprocess_v1op.py + - src/mistralai/models/libraries_documents_update_v1op.py + - src/mistralai/models/libraries_documents_upload_v1op.py + - src/mistralai/models/libraries_get_v1op.py + - src/mistralai/models/libraries_share_create_v1op.py + - src/mistralai/models/libraries_share_delete_v1op.py + - src/mistralai/models/libraries_share_list_v1op.py + - src/mistralai/models/libraries_update_v1op.py + - src/mistralai/models/libraryin.py + - src/mistralai/models/libraryinupdate.py + - src/mistralai/models/libraryout.py + - src/mistralai/models/listdocumentout.py + - src/mistralai/models/listfilesout.py + - src/mistralai/models/listlibraryout.py + - src/mistralai/models/listsharingout.py + - src/mistralai/models/messageentries.py + - src/mistralai/models/messageinputcontentchunks.py + - src/mistralai/models/messageinputentry.py + - src/mistralai/models/messageoutputcontentchunks.py + - src/mistralai/models/messageoutputentry.py + - src/mistralai/models/messageoutputevent.py + - src/mistralai/models/metricout.py + - src/mistralai/models/mistralerror.py + - src/mistralai/models/mistralpromptmode.py + - src/mistralai/models/modelcapabilities.py + - src/mistralai/models/modelconversation.py + - src/mistralai/models/modellist.py + - src/mistralai/models/moderationobject.py + - src/mistralai/models/moderationresponse.py + - src/mistralai/models/no_response_error.py + - src/mistralai/models/ocrimageobject.py + - src/mistralai/models/ocrpagedimensions.py + - src/mistralai/models/ocrpageobject.py + - src/mistralai/models/ocrrequest.py + - src/mistralai/models/ocrresponse.py + - src/mistralai/models/ocrtableobject.py + - src/mistralai/models/ocrusageinfo.py + - src/mistralai/models/outputcontentchunks.py + - src/mistralai/models/paginationinfo.py + - src/mistralai/models/prediction.py + - src/mistralai/models/processingstatusout.py + - src/mistralai/models/referencechunk.py + - src/mistralai/models/requestsource.py + - src/mistralai/models/responsedoneevent.py + - src/mistralai/models/responseerrorevent.py + - src/mistralai/models/responseformat.py + - src/mistralai/models/responseformats.py + - src/mistralai/models/responsestartedevent.py + - src/mistralai/models/responsevalidationerror.py + - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py + - src/mistralai/models/retrievefileout.py + - src/mistralai/models/sampletype.py + - src/mistralai/models/sdkerror.py + - src/mistralai/models/security.py + - src/mistralai/models/shareenum.py + - src/mistralai/models/sharingdelete.py + - src/mistralai/models/sharingin.py + - src/mistralai/models/sharingout.py + - src/mistralai/models/source.py + - src/mistralai/models/ssetypes.py + - src/mistralai/models/systemmessage.py + - src/mistralai/models/systemmessagecontentchunks.py + - src/mistralai/models/textchunk.py + - src/mistralai/models/thinkchunk.py + - src/mistralai/models/timestampgranularity.py + - src/mistralai/models/tool.py + - src/mistralai/models/toolcall.py + - src/mistralai/models/toolchoice.py + - src/mistralai/models/toolchoiceenum.py + - src/mistralai/models/toolexecutiondeltaevent.py + - src/mistralai/models/toolexecutiondoneevent.py + - src/mistralai/models/toolexecutionentry.py + - src/mistralai/models/toolexecutionstartedevent.py + - src/mistralai/models/toolfilechunk.py + - src/mistralai/models/toolmessage.py + - src/mistralai/models/toolreferencechunk.py + - src/mistralai/models/tooltypes.py + - src/mistralai/models/trainingfile.py + - src/mistralai/models/transcriptionresponse.py + - src/mistralai/models/transcriptionsegmentchunk.py + - src/mistralai/models/transcriptionstreamdone.py + - src/mistralai/models/transcriptionstreamevents.py + - src/mistralai/models/transcriptionstreameventtypes.py + - src/mistralai/models/transcriptionstreamlanguage.py + - src/mistralai/models/transcriptionstreamsegmentdelta.py + - src/mistralai/models/transcriptionstreamtextdelta.py + - src/mistralai/models/unarchiveftmodelout.py + - src/mistralai/models/updateftmodelin.py + - src/mistralai/models/uploadfileout.py + - src/mistralai/models/usageinfo.py + - src/mistralai/models/usermessage.py + - src/mistralai/models/validationerror.py + - src/mistralai/models/wandbintegration.py + - src/mistralai/models/wandbintegrationout.py + - src/mistralai/models/websearchpremiumtool.py + - src/mistralai/models/websearchtool.py + - src/mistralai/models_.py + - src/mistralai/ocr.py + - src/mistralai/py.typed + - src/mistralai/sdk.py + - src/mistralai/sdkconfiguration.py + - src/mistralai/transcriptions.py + - src/mistralai/types/__init__.py + - src/mistralai/types/basemodel.py + - src/mistralai/utils/__init__.py + - src/mistralai/utils/annotations.py + - src/mistralai/utils/datetimes.py + - src/mistralai/utils/enums.py + - src/mistralai/utils/eventstreaming.py + - src/mistralai/utils/forms.py + - src/mistralai/utils/headers.py + - src/mistralai/utils/logger.py + - src/mistralai/utils/metadata.py + - src/mistralai/utils/queryparams.py + - src/mistralai/utils/requestbodies.py + - src/mistralai/utils/retries.py + - src/mistralai/utils/security.py + - src/mistralai/utils/serializers.py + - src/mistralai/utils/unmarshal_json_response.py + - src/mistralai/utils/url.py + - src/mistralai/utils/values.py diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 85ac8dac..53216a9e 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -16,12 +16,17 @@ generation: auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + persistentEdits: {} tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.10.0 + version: 1.11.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -29,10 +34,12 @@ python: allowedRedefinedBuiltins: - id - object + asyncMode: both authors: - Mistral baseErrorName: MistralError clientServerStatusCodesAsErrors: true + constFieldCasing: upper defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API. enableCustomCodeRegions: true @@ -51,14 +58,19 @@ python: operations: "" shared: "" webhooks: "" + inferUnionDiscriminators: true inputModelSuffix: input + license: "" maxMethodParams: 15 methodArguments: infer-optional-args moduleName: "" + multipartArrayFormat: legacy outputModelSuffix: output packageManager: uv packageName: mistralai + preApplyUnionDiscriminators: false pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat + sseFlatResponse: false templateVersion: v2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 48c4bf7b..da32528e 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,15 +1,15 @@ -speakeasyVersion: 1.606.10 +speakeasyVersion: 1.681.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:670c460702ec74f7077491464a6dc5ee9d873969c80e812c48dbf4deb160e470 - sourceBlobDigest: sha256:5a3ebfa4cb00a015bb7bb03ec7442fc7e0b9c17ca66ab35d3045290b2ad87eac + sourceRevisionDigest: sha256:fcc7262f29ef89a07cb718d7e6094c272627cf9f531588aef15a6e92dd50130a + sourceBlobDigest: sha256:9e6fc34474062726ceb96e424e858a0ae1b0506659cd11a58c72e1dd50dae885 tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 - sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f + sourceRevisionDigest: sha256:bc59aaf55dc46e94ddf6cc687292807629d7a17ee5f573a504e7e44fd365e147 + sourceBlobDigest: sha256:545fe85c5dae11def2741fc7a99f297b7f0728c9677c3c7b94d56ddbed70581d tags: - latest mistral-openapi: @@ -18,32 +18,31 @@ sources: sourceBlobDigest: sha256:f0caa06fb9bcadc35b097aa5ff69bb5020937652df311722b5e44a282bd95d6d tags: - latest - - speakeasy-sdk-regen-1765914268 targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:670c460702ec74f7077491464a6dc5ee9d873969c80e812c48dbf4deb160e470 - sourceBlobDigest: sha256:5a3ebfa4cb00a015bb7bb03ec7442fc7e0b9c17ca66ab35d3045290b2ad87eac + sourceRevisionDigest: sha256:fcc7262f29ef89a07cb718d7e6094c272627cf9f531588aef15a6e92dd50130a + sourceBlobDigest: sha256:9e6fc34474062726ceb96e424e858a0ae1b0506659cd11a58c72e1dd50dae885 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:e6802c97fd9783aa91cc0853de1a889944f699b88e0dafcf9fecd83de6e2c6c9 + codeSamplesRevisionDigest: sha256:c01681b7fafd643cd608f569863a4f2be4e46a4a156d3ee877d36d90f91078e9 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 - sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f + sourceRevisionDigest: sha256:bc59aaf55dc46e94ddf6cc687292807629d7a17ee5f573a504e7e44fd365e147 + sourceBlobDigest: sha256:545fe85c5dae11def2741fc7a99f297b7f0728c9677c3c7b94d56ddbed70581d codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:03b3e82c20d10faa8622f14696632b96b1a2e8d747b266fff345061298d5f3e4 + codeSamplesRevisionDigest: sha256:09ffc080d6c15263774be0f3ff05ca02928bbc28224cf4e720254aa91852c9e0 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:cb63bd997cefe7b3b36e91a475df57cb779bf79f183340e0713d8ffb16a2dabc sourceBlobDigest: sha256:f0caa06fb9bcadc35b097aa5ff69bb5020937652df311722b5e44a282bd95d6d codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:b1eacff97275a14ab0c2143e07bdfa4f4bd58f5370b2f106bcc6ada92b754d08 + codeSamplesRevisionDigest: sha256:787c3614f1d0b8c10d93a11289117cb8dfd95da61ebfeb3754bc23fe8615f4f0 workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.606.10 + speakeasyVersion: 1.681.0 sources: mistral-azure-source: inputs: diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 3156d149..32e1d624 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.606.10 +speakeasyVersion: 1.681.0 sources: mistral-azure-source: inputs: diff --git a/README.md b/README.md index ba054118..d755d249 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,15 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo > > Once a Python version reaches its [official end of life date](https://devguide.python.org/versions/), a 3-month grace period is provided for users to upgrade. Following this grace period, the minimum python version supported in the SDK will be updated. -The SDK can be installed with either *pip* or *uv* package managers. +The SDK can be installed with *uv*, *pip*, or *poetry* package managers. + +### uv + +*uv* is a fast Python package installer and resolver, designed as a drop-in replacement for pip and pip-tools. It's recommended for its speed and modern Python tooling capabilities. + +```bash +uv add mistralai +``` ### PIP @@ -68,12 +76,12 @@ The SDK can be installed with either *pip* or *uv* package managers. pip install mistralai ``` -### UV +### Poetry -*UV* is an extremely fast Python package and project manager. You can use it to add the SDK to your project: +*Poetry* is a modern tool that simplifies dependency management and package publishing by using a single `pyproject.toml` file to handle project metadata and dependencies. ```bash -uv add mistralai +poetry add mistralai ``` ### Shell and script usage with `uv` @@ -89,7 +97,7 @@ It's also possible to write a standalone Python script without needing to set up ```python #!/usr/bin/env -S uv run --script # /// script -# requires-python = ">=3.10" +# requires-python = ">=3.9" # dependencies = [ # "mistralai", # ] @@ -152,6 +160,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -205,6 +214,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -258,6 +268,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -311,6 +322,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -436,33 +448,24 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA
Available methods -### [agents](docs/sdks/agents/README.md) +### [Agents](docs/sdks/agents/README.md) * [complete](docs/sdks/agents/README.md#complete) - Agents Completion * [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion -### [audio](docs/sdks/audio/README.md) - - -#### [audio.transcriptions](docs/sdks/transcriptions/README.md) +### [Audio.Transcriptions](docs/sdks/transcriptions/README.md) * [complete](docs/sdks/transcriptions/README.md#complete) - Create Transcription * [stream](docs/sdks/transcriptions/README.md#stream) - Create Streaming Transcription (SSE) -### [batch](docs/sdks/batch/README.md) - - -#### [batch.jobs](docs/sdks/mistraljobs/README.md) +### [Batch.Jobs](docs/sdks/mistraljobs/README.md) * [list](docs/sdks/mistraljobs/README.md#list) - Get Batch Jobs * [create](docs/sdks/mistraljobs/README.md#create) - Create Batch Job * [get](docs/sdks/mistraljobs/README.md#get) - Get Batch Job * [cancel](docs/sdks/mistraljobs/README.md#cancel) - Cancel Batch Job -### [beta](docs/sdks/beta/README.md) - - -#### [beta.agents](docs/sdks/mistralagents/README.md) +### [Beta.Agents](docs/sdks/mistralagents/README.md) * [create](docs/sdks/mistralagents/README.md#create) - Create a agent that can be used within a conversation. * [list](docs/sdks/mistralagents/README.md#list) - List agent entities. @@ -471,7 +474,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [delete](docs/sdks/mistralagents/README.md#delete) - Delete an agent entity. * [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. -#### [beta.conversations](docs/sdks/conversations/README.md) +### [Beta.Conversations](docs/sdks/conversations/README.md) * [start](docs/sdks/conversations/README.md#start) - Create a conversation and append entries to it. * [list](docs/sdks/conversations/README.md#list) - List all created conversations. @@ -485,7 +488,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [append_stream](docs/sdks/conversations/README.md#append_stream) - Append new entries to an existing conversation. * [restart_stream](docs/sdks/conversations/README.md#restart_stream) - Restart a conversation starting from a given entry. -#### [beta.libraries](docs/sdks/libraries/README.md) +### [Beta.Libraries](docs/sdks/libraries/README.md) * [list](docs/sdks/libraries/README.md#list) - List all libraries you have access to. * [create](docs/sdks/libraries/README.md#create) - Create a new Library. @@ -493,13 +496,13 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [delete](docs/sdks/libraries/README.md#delete) - Delete a library and all of it's document. * [update](docs/sdks/libraries/README.md#update) - Update a library. -#### [beta.libraries.accesses](docs/sdks/accesses/README.md) +#### [Beta.Libraries.Accesses](docs/sdks/accesses/README.md) * [list](docs/sdks/accesses/README.md#list) - List all of the access to this library. * [update_or_create](docs/sdks/accesses/README.md#update_or_create) - Create or update an access level. * [delete](docs/sdks/accesses/README.md#delete) - Delete an access level. -#### [beta.libraries.documents](docs/sdks/documents/README.md) +#### [Beta.Libraries.Documents](docs/sdks/documents/README.md) * [list](docs/sdks/documents/README.md#list) - List documents in a given library. * [upload](docs/sdks/documents/README.md#upload) - Upload a new document. @@ -512,23 +515,23 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [extracted_text_signed_url](docs/sdks/documents/README.md#extracted_text_signed_url) - Retrieve the signed URL of text extracted from a given document. * [reprocess](docs/sdks/documents/README.md#reprocess) - Reprocess a document. -### [chat](docs/sdks/chat/README.md) +### [Chat](docs/sdks/chat/README.md) * [complete](docs/sdks/chat/README.md#complete) - Chat Completion * [stream](docs/sdks/chat/README.md#stream) - Stream chat completion -### [classifiers](docs/sdks/classifiers/README.md) +### [Classifiers](docs/sdks/classifiers/README.md) * [moderate](docs/sdks/classifiers/README.md#moderate) - Moderations * [moderate_chat](docs/sdks/classifiers/README.md#moderate_chat) - Chat Moderations * [classify](docs/sdks/classifiers/README.md#classify) - Classifications * [classify_chat](docs/sdks/classifiers/README.md#classify_chat) - Chat Classifications -### [embeddings](docs/sdks/embeddings/README.md) +### [Embeddings](docs/sdks/embeddings/README.md) * [create](docs/sdks/embeddings/README.md#create) - Embeddings -### [files](docs/sdks/files/README.md) +### [Files](docs/sdks/files/README.md) * [upload](docs/sdks/files/README.md#upload) - Upload File * [list](docs/sdks/files/README.md#list) - List Files @@ -537,15 +540,12 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [download](docs/sdks/files/README.md#download) - Download File * [get_signed_url](docs/sdks/files/README.md#get_signed_url) - Get Signed Url -### [fim](docs/sdks/fim/README.md) +### [Fim](docs/sdks/fim/README.md) * [complete](docs/sdks/fim/README.md#complete) - Fim Completion * [stream](docs/sdks/fim/README.md#stream) - Stream fim completion -### [fine_tuning](docs/sdks/finetuning/README.md) - - -#### [fine_tuning.jobs](docs/sdks/jobs/README.md) +### [FineTuning.Jobs](docs/sdks/jobs/README.md) * [list](docs/sdks/jobs/README.md#list) - Get Fine Tuning Jobs * [create](docs/sdks/jobs/README.md#create) - Create Fine Tuning Job @@ -553,8 +553,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [cancel](docs/sdks/jobs/README.md#cancel) - Cancel Fine Tuning Job * [start](docs/sdks/jobs/README.md#start) - Start Fine Tuning Job - -### [models](docs/sdks/models/README.md) +### [Models](docs/sdks/models/README.md) * [list](docs/sdks/models/README.md#list) - List Models * [retrieve](docs/sdks/models/README.md#retrieve) - Retrieve Model @@ -563,7 +562,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model * [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model -### [ocr](docs/sdks/ocr/README.md) +### [Ocr](docs/sdks/ocr/README.md) * [process](docs/sdks/ocr/README.md#process) - OCR diff --git a/USAGE.md b/USAGE.md index b230b016..a31d502f 100644 --- a/USAGE.md +++ b/USAGE.md @@ -29,6 +29,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -82,6 +83,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -135,6 +137,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -188,6 +191,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index af768506..040bc24c 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -1,5 +1,4 @@ -# Accesses -(*beta.libraries.accesses*) +# Beta.Libraries.Accesses ## Overview diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 87a411cd..173925ee 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -1,5 +1,4 @@ # Agents -(*agents*) ## Overview diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 213ab710..5bb24baa 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -1,5 +1,4 @@ # Chat -(*chat*) ## Overview diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 75b8c333..57a3f805 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -1,5 +1,4 @@ # Classifiers -(*classifiers*) ## Overview diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 1e2d560e..c488848c 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -1,5 +1,4 @@ -# Conversations -(*beta.conversations*) +# Beta.Conversations ## Overview diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index c1551925..d3f5a975 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -1,5 +1,4 @@ -# Documents -(*beta.libraries.documents*) +# Beta.Libraries.Documents ## Overview diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index b03ea9cd..6a2768a2 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -1,5 +1,4 @@ # Embeddings -(*embeddings*) ## Overview diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 0a68c1f5..f0dfd593 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -1,5 +1,4 @@ # Files -(*files*) ## Overview diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index d282a810..db6f2e1b 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -1,5 +1,4 @@ # Fim -(*fim*) ## Overview diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index b06170f8..666224a7 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -1,5 +1,4 @@ -# Jobs -(*fine_tuning.jobs*) +# FineTuning.Jobs ## Overview diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index 14d39f97..e672c190 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -1,5 +1,4 @@ -# Libraries -(*beta.libraries*) +# Beta.Libraries ## Overview diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index 767ba56d..97819467 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -1,5 +1,4 @@ -# MistralAgents -(*beta.agents*) +# Beta.Agents ## Overview diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 469a2029..cc23c1b9 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -1,5 +1,4 @@ -# MistralJobs -(*batch.jobs*) +# Batch.Jobs ## Overview diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 94491520..d51866b6 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -1,5 +1,4 @@ # Models -(*models*) ## Overview diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 9264d104..6fd904cc 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -1,5 +1,4 @@ # Ocr -(*ocr*) ## Overview diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index 52b7884e..3243258c 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -1,5 +1,4 @@ -# Transcriptions -(*audio.transcriptions*) +# Audio.Transcriptions ## Overview diff --git a/examples/gcp/async_chat_no_streaming.py b/examples/gcp/async_chat_no_streaming.py index 178f151c..46c33d82 100755 --- a/examples/gcp/async_chat_no_streaming.py +++ b/examples/gcp/async_chat_no_streaming.py @@ -8,7 +8,7 @@ async def main(): - model = "mistral-large-2407" + model = "mistral-small-2503" client = MistralGoogleCloud(project_id=os.environ["GCP_PROJECT_ID"]) diff --git a/packages/mistralai_azure/.gitignore b/packages/mistralai_azure/.gitignore index f2ea8c39..b386de74 100644 --- a/packages/mistralai_azure/.gitignore +++ b/packages/mistralai_azure/.gitignore @@ -1,3 +1,5 @@ +.env +.env.local **/__pycache__/ **/.speakeasy/temp/ **/.speakeasy/logs/ diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index bce8e3c8..4fe641c6 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,39 +1,706 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 87653f040f5b36c90e066870f34c478e + docChecksum: 3cd8710baef46375e8114574e63628e2 docVersion: 1.0.0 - speakeasyVersion: 1.568.2 - generationVersion: 2.634.2 - releaseVersion: 1.6.0 - configChecksum: 0b604304465a25f89acca310710262d1 + speakeasyVersion: 1.681.0 + generationVersion: 2.789.5 + releaseVersion: 1.7.9 + configChecksum: 67e863406400894b2d103f2d691d1e49 published: true +persistentEdits: + generation_id: c2418757-815c-4255-8e8b-1136e64f92a6 + pristine_commit_hash: a1b8f572ddf9edad78c7dc1230ea53b493ac7473 + pristine_tree_hash: 13b71c15fd0b885b52e54cb18742cb48c0524686 features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.19.3 + core: 5.23.15 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.1 + examples: 3.0.2 flatRequests: 1.0.1 - globalSecurity: 3.0.3 + globalSecurity: 3.0.4 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.1 + globalServerURLs: 3.2.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 nullables: 1.0.1 openEnums: 1.0.1 responseFormat: 1.0.1 - retries: 3.0.2 - sdkHooks: 1.1.0 - serverEvents: 1.0.7 + retries: 3.0.3 + sdkHooks: 1.2.0 + serverEvents: 1.0.11 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.4 + unions: 3.1.1 +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 + pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/assistantmessagerole.md: + id: bb5d2a4bc72f + last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 + pristine_git_object: 658229e77eb6419391cf7941568164541c528387 + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 + docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:7cf0b10e3322059649e32f581b24d8bad684a7eb + pristine_git_object: be296b4b0a9368511b051d7faafc3ebcfdf0b45f + docs/models/chatcompletionrequestmessages.md: + id: ec996b350e12 + last_write_checksum: sha1:2ecec8d12cdb48426f4eb62732066fc79fcd4ec3 + pristine_git_object: bc7708a67f06d74e8a5bf1facb2b23fb1e08053c + docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:8b1a0f34d667aaed3209e2d9ff64f2c92629db30 + pristine_git_object: 03ad3291bbd750cfca0bb62fb556adfea748b7bc + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:dcf4b125b533192cb5aea1a68551866954712dc5 + pristine_git_object: c807dacd98eb3561ee45f40db71a92cb72b0f6de + docs/models/content.md: + id: bfd859c99f86 + last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 + pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d + pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 + pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + docs/models/document.md: + id: cd1d2a444370 + last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 + pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 + docs/models/documenturlchunk.md: + id: 48437d297408 + last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 + pristine_git_object: 6c9a5b4d9e6769be242b27ef0208f6af704689c0 + docs/models/documenturlchunktype.md: + id: a3574c91f539 + last_write_checksum: sha1:a0134fc0ea822d55b1204ee71140f2aa9d8dbe9c + pristine_git_object: 32e1fa9e975a3633fb49057b38b0ea0206b2d8ef + docs/models/filechunk.md: + id: edc076728e9d + last_write_checksum: sha1:07ab5db503211adba2fa099e66d12ac3c4bbf680 + pristine_git_object: 18217114060ac4e4b45fefabace4628684f27e5c + docs/models/finishreason.md: + id: 73315c2a39b3 + last_write_checksum: sha1:5b58c7fa9219f728b9731287e21abe1be9f11e4a + pristine_git_object: 45a5aedb7241cf080df3eb976a4413064d314009 + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/httpvalidationerror.md: + id: a211c095f2ac + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 + pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 + pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 + docs/models/imageurlchunkimageurl.md: + id: c7fae88454ce + last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 + pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 + docs/models/imageurlchunktype.md: + id: b9af2db9ff60 + last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 + pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/messages.md: + id: 2103cd675c2f + last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 + pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 + pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + docs/models/ocrimageobject.md: + id: b72f3c5853b2 + last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 + pristine_git_object: 3c0d5544a80499b011467f29ef83d49f53801af6 + docs/models/ocrpagedimensions.md: + id: b3429f9883f5 + last_write_checksum: sha1:6435aa56e6153b0c90a546818ed780105ae1042a + pristine_git_object: c93ca64d5e20319ec6ec1bcb82b28c6ce0940f29 + docs/models/ocrpageobject.md: + id: 88a9e101b11e + last_write_checksum: sha1:2f23a941e84ed514f364bb57058953254a0028a9 + pristine_git_object: 9db3bb774502de38773fe503fdf36fa87ffc3f64 + docs/models/ocrrequest.md: + id: 6862a3fc2d0f + last_write_checksum: sha1:eab100eeab2e5191d271d3c7fd8994028dc24eb9 + pristine_git_object: 0ec824d298b50a7a8f112fb4fceea95ad96111f9 + docs/models/ocrresponse.md: + id: 30042328fb78 + last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 + pristine_git_object: 0a309317644eedc643009b6cec3a7dbb142b1a15 + docs/models/ocrusageinfo.md: + id: 419abbb8353a + last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e + pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 + pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc + docs/models/referencechunktype.md: + id: 0944b80ea9c8 + last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 + pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/role.md: + id: b694540a5b1e + last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d + pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 + pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 + docs/models/stop.md: + id: f231cc9f5041 + last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 + pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f + pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:6d41d1991d122805734ed0d90ee01592aa5ae6ff + pristine_git_object: 6daab3c381bd8c13d2935bf62578648a8470fc76 + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b + pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab + docs/models/thinkchunktype.md: + id: 0fbeed985341 + last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 + pristine_git_object: baf6f755252d027295be082b53ecf80555039414 + docs/models/thinking.md: + id: 07234f8dd364 + last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 + pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 + pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/toolmessagerole.md: + id: f333d4d1ab56 + last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f + pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/type.md: + id: 98c32f09b2c8 + last_write_checksum: sha1:8aa9ca999e9648ddc2240bf80780684e3e858ddf + pristine_git_object: eb0581e7174b6951d69c485a64af5244cb8687fa + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed + pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/usermessagerole.md: + id: 99ffa937c462 + last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 + pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + pylintrc: + id: 7ce8b9f946e6 + last_write_checksum: sha1:6b615d49741eb9ae16375d3a499767783d1128a1 + pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 + pyproject.toml: + id: 5d07e7d72637 + last_write_checksum: sha1:fe91a0866e28c0a5aa96024ebb0a11f0a229d0b9 + pristine_git_object: a1208cb7025a824deb885a79d589d5f4bfff56be + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:adc9b741c12ad1591ab4870eabe20f0d0a86cd1a + pristine_git_object: ef28dc10c60d7d6a4bac0c6a1e9caba36b471861 + src/mistralai_azure/__init__.py: + id: 3cd9e92c2f72 + last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b + pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + src/mistralai_azure/_hooks/__init__.py: + id: 66932eacf398 + last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d + pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + src/mistralai_azure/_hooks/sdkhooks.py: + id: 1184c9201c62 + last_write_checksum: sha1:c98774db1664db2bc6d80e8a5f4f5133260f201a + pristine_git_object: 37ff4e9f0ebd42a58ada6300098a5b1b85a54b69 + src/mistralai_azure/_hooks/types.py: + id: a32fe1943bce + last_write_checksum: sha1:78fc31840a38e668a73871885c779929196a8bec + pristine_git_object: 0c22d7ebccdd64097033454b7c698d10ee59987d + src/mistralai_azure/_version.py: + id: 7711a0bb1da3 + last_write_checksum: sha1:114ed166f6b79aeb7259cb27dcc286f0e10808d8 + pristine_git_object: 71674e19cfa8c66eae4e9301ee37607c0c329e25 + src/mistralai_azure/basesdk.py: + id: 7d825dbc7d6e + last_write_checksum: sha1:4070786599952b3c603d1384d87d7b92bb13b974 + pristine_git_object: 89f7dc493d7f50d5f2d3f468c0a8392a6ec5e28b + src/mistralai_azure/chat.py: + id: ebf1c99bea88 + last_write_checksum: sha1:6a5ceaa44c7a05cf14bf7aa7657a4fc0622ce2a8 + pristine_git_object: 29c7d8f7c15d42f7522ca992343af0e08c98ee7c + src/mistralai_azure/httpclient.py: + id: 808a3f534ffa + last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 + pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + src/mistralai_azure/models/__init__.py: + id: e5fcf3933d2c + last_write_checksum: sha1:73a0624ebe11ecae90f5839968f152e5fccf7c23 + pristine_git_object: 140eec8889cea05c768f7095ed9821fb41cf4212 + src/mistralai_azure/models/assistantmessage.py: + id: 15f117b45380 + last_write_checksum: sha1:3c2872d06ad465dbbbedcf8d397d1f12961e1e2e + pristine_git_object: 7790eb10a034d892c3c1e793c412c75ff8820e40 + src/mistralai_azure/models/chatcompletionchoice.py: + id: 93cfc6cec0d2 + last_write_checksum: sha1:ea89ca0be028f7f8772949a580b33d145abf04c9 + pristine_git_object: 81caf52bba733a5f36f045f10fdee7630ace4738 + src/mistralai_azure/models/chatcompletionrequest.py: + id: d046a16b5e58 + last_write_checksum: sha1:2a7ca28412eb7678979600ce439fcedbc9afcbc4 + pristine_git_object: ecb33b81fd3d77d4530f4e43c1ce93e03b30cee8 + src/mistralai_azure/models/chatcompletionresponse.py: + id: fc342e80f579 + last_write_checksum: sha1:a93593ec734420bc122f0b0b8c49d630795f1d42 + pristine_git_object: 7a66f3221a154b1a0f47c4f808ece8e580280548 + src/mistralai_azure/models/chatcompletionstreamrequest.py: + id: 1052b055a791 + last_write_checksum: sha1:a52652cb4df023ac9f17a7c96ec306684a5de370 + pristine_git_object: d13faa081bd1741dcfad9d4e0dc79a736a1024db + src/mistralai_azure/models/completionchunk.py: + id: e04bc380589d + last_write_checksum: sha1:490c3236276ae8fdecb883744e263aecbe4c608c + pristine_git_object: d6cc2a86a1fda1ebce1f3c5a169ab1118705e3f0 + src/mistralai_azure/models/completionevent.py: + id: e75909f919b1 + last_write_checksum: sha1:9f5423ad56747fb7cc95a6f01e0826510571d4c1 + pristine_git_object: 5a2039c2492bab82184b4f2469806f8b977a7246 + src/mistralai_azure/models/completionresponsestreamchoice.py: + id: 24fe265a60d8 + last_write_checksum: sha1:0b2e738356ab5331e4bd2fd717d2bb5b7b7d7700 + pristine_git_object: 3afecac9f5de1b0acd80b5c421288015d1b70ad5 + src/mistralai_azure/models/contentchunk.py: + id: 9e6b90acdf54 + last_write_checksum: sha1:e93c57ef87654a06d8849030f65db3d279f8f7ad + pristine_git_object: e6a3e24a8857ea1661874197eec967f0ac99e31d + src/mistralai_azure/models/deltamessage.py: + id: 593eaaeda97b + last_write_checksum: sha1:9c2f6e52c81d2f5bf71f520861158dc5eae6eab7 + pristine_git_object: 7fa3c3f216153ebc0a2d31e590793698e95a8be8 + src/mistralai_azure/models/documenturlchunk.py: + id: bff69bfa8014 + last_write_checksum: sha1:5c515c4c85b78d8f4cf147faab9cf01c3501e0b9 + pristine_git_object: ea8d5625a6d1579dd60f2e4a067f455c82334986 + src/mistralai_azure/models/filechunk.py: + id: 0de687fe41c1 + last_write_checksum: sha1:56a1765b46702d24ee9c00ab3a06ccdbffdd63f9 + pristine_git_object: 2c3edc078b5e781b4d7163ab01e02a3347c81e2f + src/mistralai_azure/models/function.py: + id: 16111a6101f2 + last_write_checksum: sha1:456d34df457592f1975b0d1e158207d4446a6c41 + pristine_git_object: a4642f92a0cf614b458591c220a83ae1c422ce25 + src/mistralai_azure/models/functioncall.py: + id: e383b31a7f16 + last_write_checksum: sha1:cec288f925fa58842bb7d9e688f6122a01973d4b + pristine_git_object: dd93c4629c3bd81dd6fb305474ce0cd5443e1bdb + src/mistralai_azure/models/functionname.py: + id: ebc3e07e4b6f + last_write_checksum: sha1:743cec4c3f586d67d1ab2816d8d76170f46a3ca1 + pristine_git_object: b55c82af3f29efe38698bc776a8532c647dccc36 + src/mistralai_azure/models/httpvalidationerror.py: + id: da4825943f94 + last_write_checksum: sha1:dce58ead8f7f901514250e1ae5965ba039b1da14 + pristine_git_object: 56607d9437ce39097deac134d4f622ea523cbda7 + src/mistralai_azure/models/imageurl.py: + id: 80cc0df94e9d + last_write_checksum: sha1:a1a416ae5bf9c559219cff5f008a90f251a52477 + pristine_git_object: a5a66360b017cbdc342775241aa4aa2322534c6a + src/mistralai_azure/models/imageurlchunk.py: + id: c5c6dd2f1782 + last_write_checksum: sha1:11634325be12aa567b42227f2117e9b8c854a51c + pristine_git_object: a40e451c60caca688a9379dcb20d545e9e6b76e2 + src/mistralai_azure/models/jsonschema.py: + id: 8c635811dd6b + last_write_checksum: sha1:a99a6de224e51eb6cf85fa6de8cf37266ab5fe6d + pristine_git_object: 0f7563fc17bf172d527d09507294b4ef5646c22c + src/mistralai_azure/models/mistralazureerror.py: + id: a919897c4ea9 + last_write_checksum: sha1:25f4411c7411faad753d46118edf74828b1c9f7c + pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 + src/mistralai_azure/models/mistralpromptmode.py: + id: f62a521bcdae + last_write_checksum: sha1:fcb16c10986bd6946f79b9e330a4be9f26f7e724 + pristine_git_object: 22fb643896688b68af238f6ac75cf41a00b0511b + src/mistralai_azure/models/no_response_error.py: + id: 54523e14f29b + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai_azure/models/ocrimageobject.py: + id: 6c349909fb0c + last_write_checksum: sha1:0fed6abf8172f6ee40e703ef86ee9d902c6e5d7e + pristine_git_object: 9d0dd01dbb5be095e234aa3ec9469fface68c3d2 + src/mistralai_azure/models/ocrpagedimensions.py: + id: f33f598001b2 + last_write_checksum: sha1:5281879ef3d737a17a539cefda9f222302ead7da + pristine_git_object: efb62a58f22ad62c730b3af93bff151586105957 + src/mistralai_azure/models/ocrpageobject.py: + id: 99f20768c4d6 + last_write_checksum: sha1:91fb475aeebba5a12a71f2fdff76305a92824551 + pristine_git_object: 4438e732deaa3160cb39d5c4568b86f63bac9177 + src/mistralai_azure/models/ocrrequest.py: + id: 4e574d5fb9be + last_write_checksum: sha1:856e4640bbd54518481f78f3f334d7a4b22bd9a4 + pristine_git_object: 533d074252af36787ed02dae850fb58054aadf2f + src/mistralai_azure/models/ocrresponse.py: + id: 326a4d9fab25 + last_write_checksum: sha1:cf597498a5841a56bbd1aeb8478bd57a01d93cb1 + pristine_git_object: 3e43fa8eb7b80fafbd9344ad5a98c0ead98c54cb + src/mistralai_azure/models/ocrusageinfo.py: + id: 0de4eae62e4b + last_write_checksum: sha1:85e5a850bd2f847e4a02b0731b0327ca0a02f643 + pristine_git_object: 1f5c9f1bc2cf2d728dec06b0930602852474a29e + src/mistralai_azure/models/prediction.py: + id: 9e8a0a7a3ca7 + last_write_checksum: sha1:e78af600f109a7489a5bcce80b48adf29cc0c4c3 + pristine_git_object: b23a935c00cd7ce4e7b7bd6fe8f2da87f8aaca92 + src/mistralai_azure/models/referencechunk.py: + id: 420a12dfec3b + last_write_checksum: sha1:f49da7a4541f55b283e9391e6397a9e4286570bd + pristine_git_object: 32d2ca68e67be3f03e14f74fd7e7692fa05b70f5 + src/mistralai_azure/models/responseformat.py: + id: aa7acbc1bda7 + last_write_checksum: sha1:70e7960bb4ec5db5f133c4cc8f6e813e39f8c671 + pristine_git_object: c989f3a4467c21416ea59b33fbc734a1477a6eb3 + src/mistralai_azure/models/responseformats.py: + id: 780a7aa0e87e + last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + src/mistralai_azure/models/responsevalidationerror.py: + id: 1952c765e2ec + last_write_checksum: sha1:d516c0c88210dd28b65747daa2fa1b63f432fe89 + pristine_git_object: a33954ccead3a8df87bdcc30a090efbb0ebecb94 + src/mistralai_azure/models/sdkerror.py: + id: bd8616367442 + last_write_checksum: sha1:41c259fac1bd50d33f1a2fd64d1ed17fd8d0d075 + pristine_git_object: 216d7f8fca986ac29162a1a7cba8c18b7f73d012 + src/mistralai_azure/models/security.py: + id: 7b3bcb55164e + last_write_checksum: sha1:9cacce270a27809ded4ee91aecac4a681154f5f0 + pristine_git_object: c1ae83138b09eab742f755a0f11428cf8c0fd60d + src/mistralai_azure/models/systemmessage.py: + id: 2e15bb043753 + last_write_checksum: sha1:8ec96bfc0533414a698d92387021cac116eadade + pristine_git_object: f99bf4ffb112b068159a3b95bc99ec7ce91b3f7d + src/mistralai_azure/models/systemmessagecontentchunks.py: + id: b6d9a4838359 + last_write_checksum: sha1:1e3f4688317d10f207dd42ef39cf2ac8f6042e54 + pristine_git_object: 4615a16cf39496dffc4982c6f0552d8bf353e280 + src/mistralai_azure/models/textchunk.py: + id: c169e3f0ffc9 + last_write_checksum: sha1:6cb623bafd4005e527dca9b908bb9f4b371342da + pristine_git_object: 5845456e5ca3089bcb551112408a0de84c597a91 + src/mistralai_azure/models/thinkchunk.py: + id: b1b9aeee4dcf + last_write_checksum: sha1:d15b39ef3e12195183664c32854233b9410d565b + pristine_git_object: f53a9f1ad2e6f124a36c9fb9be65bc09dbfbff4b + src/mistralai_azure/models/tool.py: + id: 99c8106f5428 + last_write_checksum: sha1:e50c947a9247a3b17561fbe035a14a1134768dbd + pristine_git_object: ffd9b062e2e025fb87d7ffd4eb187e860d19a51b + src/mistralai_azure/models/toolcall.py: + id: 3643db1054cd + last_write_checksum: sha1:83a920888952c9db3a297eb54aa239c3d5499327 + pristine_git_object: 6ccdcaa2a2e8fcc0e2fd88f4e45ec1ca4953d02a + src/mistralai_azure/models/toolchoice.py: + id: 669768b7cbda + last_write_checksum: sha1:646fcdc7d20ede611256d50c1b8836ba4b5416f6 + pristine_git_object: cc3c2c1f93644b4c6be56b83f84ac70cf77a3cb0 + src/mistralai_azure/models/toolchoiceenum.py: + id: 5f7df8457771 + last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + src/mistralai_azure/models/toolmessage.py: + id: 1d9845bf98b3 + last_write_checksum: sha1:52bd15280bcae27ec7ba6a1c64b15648de5b0868 + pristine_git_object: 4bc5c9a9b509fdb89a4cf5ce81231189bf46bab4 + src/mistralai_azure/models/tooltypes.py: + id: 34c499f03e21 + last_write_checksum: sha1:f060bd3aebf7d42c1066c543c47cfa020e61eb27 + pristine_git_object: 638890c589ee642fd0a43e00337505e53ea3ec3a + src/mistralai_azure/models/usageinfo.py: + id: 59a5033672bf + last_write_checksum: sha1:7d0e7a483331077309b78e035cab9d65e87d3f65 + pristine_git_object: bbe5cdfaae260df81e93da11d05a1ba55ecbe329 + src/mistralai_azure/models/usermessage.py: + id: c54119314021 + last_write_checksum: sha1:b45f38755a96b07100baf5149631f366009e701f + pristine_git_object: 85fedb4bd1bcf64f69e4ead5310cf3fb354a6e3c + src/mistralai_azure/models/validationerror.py: + id: 83cd7bfd6d92 + last_write_checksum: sha1:250ed57498dabd11c0e2b6d255969e0285bb4214 + pristine_git_object: 4caff4a6b74aeb322bf42cd2070b7bd576ca834a + src/mistralai_azure/ocr.py: + id: 77e2e0f594ad + last_write_checksum: sha1:39a4a76ce3de602da92bcddf44eed93f3432ce7c + pristine_git_object: 8702300f64429e821359bb1596dbdabcf012b9c1 + src/mistralai_azure/py.typed: + id: 98df238e554c + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai_azure/sdkconfiguration.py: + id: 476a4f9e2f3e + last_write_checksum: sha1:6b117889b46a546be6e949c1bf843834ceff7417 + pristine_git_object: 51289cf05559ba32dd17e45fab78df4a8697063f + src/mistralai_azure/types/__init__.py: + id: d761bb7a67a5 + last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed + pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + src/mistralai_azure/types/basemodel.py: + id: 68c97875efb7 + last_write_checksum: sha1:615d0b364fa924b0fef719958df34596cc7c1ae2 + pristine_git_object: 231c2e37283a76082f1a064c7aae47f8ee4ee694 + src/mistralai_azure/utils/__init__.py: + id: 3c68abef839b + last_write_checksum: sha1:81e0385b93362e0f3f6911b65bd4cc601ebc11e1 + pristine_git_object: 56164cf3a86399ee7a8e1a68d19fb494689d77c3 + src/mistralai_azure/utils/annotations.py: + id: 476ee839718f + last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc + pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + src/mistralai_azure/utils/datetimes.py: + id: e9faf3b28c48 + last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 + pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai_azure/utils/enums.py: + id: 4d10693bf655 + last_write_checksum: sha1:786ba597f79dca6fbc0d87c591752bb8d775ecb7 + pristine_git_object: c3bc13cfc48794c143a64667f02e7949a8ce3fcc + src/mistralai_azure/utils/eventstreaming.py: + id: 5f5e90529fd7 + last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b + pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + src/mistralai_azure/utils/forms.py: + id: 91c3fe9ba311 + last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 + pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + src/mistralai_azure/utils/headers.py: + id: d37ef2f03e41 + last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 + pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + src/mistralai_azure/utils/logger.py: + id: 9122a46617cc + last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 + pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 + src/mistralai_azure/utils/metadata.py: + id: 2d93fa8523eb + last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 + pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + src/mistralai_azure/utils/queryparams.py: + id: dfd31ba97c2b + last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 + pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + src/mistralai_azure/utils/requestbodies.py: + id: c91db641d5b9 + last_write_checksum: sha1:e0a3a78158eba39880475d62d61be906625676b8 + pristine_git_object: d5240dd5f5efffabbd9aefa2f4a349511a9c75b4 + src/mistralai_azure/utils/retries.py: + id: 6f0cd9f6169d + last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 + pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + src/mistralai_azure/utils/security.py: + id: "270040388028" + last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 + pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 + src/mistralai_azure/utils/serializers.py: + id: 595ddab03803 + last_write_checksum: sha1:a0d184ace7371a14a7d005cca7f358a03e3d4b07 + pristine_git_object: 378a14c0f86a867ca7b0eb7e620da82234c0ccc4 + src/mistralai_azure/utils/unmarshal_json_response.py: + id: bde89a892417 + last_write_checksum: sha1:d2ce9e3478b38e54e4bb3a43610ee0bab00c2e27 + pristine_git_object: f5813119b559442ee85c0b310765db3866bfa09d + src/mistralai_azure/utils/url.py: + id: 080c62716b06 + last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 + pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + src/mistralai_azure/utils/values.py: + id: 640889083cda + last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 + pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + responses: + "422": + application/json: {} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + ocr_v1_ocr_post: + speakeasy-default-ocr-v1-ocr-post: + requestBody: + application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}} + responses: + "200": + application/json: {"pages": [], "model": "Golf", "usage_info": {"pages_processed": 944919}} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + responses: + "200": + application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} +examplesVersion: 1.0.2 +generatedTests: {} generatedFiles: - .gitattributes - .python-version @@ -185,31 +852,3 @@ generatedFiles: - src/mistralai_azure/utils/serializers.py - src/mistralai_azure/utils/url.py - src/mistralai_azure/utils/values.py -examples: - stream_chat: - speakeasy-default-stream-chat: - requestBody: - application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} - responses: - "422": - application/json: {} - chat_completion_v1_chat_completions_post: - speakeasy-default-chat-completion-v1-chat-completions-post: - requestBody: - application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} - responses: - "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} - "422": - application/json: {} - ocr_v1_ocr_post: - speakeasy-default-ocr-v1-ocr-post: - requestBody: - application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}} - responses: - "200": - application/json: {"pages": [], "model": "Golf", "usage_info": {"pages_processed": 944919}} - "422": - application/json: {} -examplesVersion: 1.0.2 -generatedTests: {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index 63e2da75..2eaf820c 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -16,20 +16,30 @@ generation: auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + persistentEdits: {} tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.6.0 + version: 1.7.9 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 + allowedRedefinedBuiltins: + - id + - object + asyncMode: both authors: - Mistral baseErrorName: MistralAzureError clientServerStatusCodesAsErrors: true + constFieldCasing: upper defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. enableCustomCodeRegions: false @@ -47,13 +57,19 @@ python: operations: "" shared: "" webhooks: "" + inferUnionDiscriminators: true inputModelSuffix: input + license: "" maxMethodParams: 15 methodArguments: infer-optional-args moduleName: "" + multipartArrayFormat: legacy outputModelSuffix: output + packageManager: uv packageName: mistralai_azure + preApplyUnionDiscriminators: false pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat + sseFlatResponse: false templateVersion: v2 diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index b0f05d37..be296b4b 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -3,23 +3,23 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md index 1646528d..dc82a8ef 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index 90397dec..03ad3291 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -3,23 +3,23 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md index cce0ca3e..43f3ca38 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionStreamRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/packages/mistralai_azure/docs/models/ocrrequest.md b/packages/mistralai_azure/docs/models/ocrrequest.md index 6a9c77ab..0ec824d2 100644 --- a/packages/mistralai_azure/docs/models/ocrrequest.md +++ b/packages/mistralai_azure/docs/models/ocrrequest.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/prediction.md b/packages/mistralai_azure/docs/models/prediction.md index 86e9c396..fae3c1ca 100644 --- a/packages/mistralai_azure/docs/models/prediction.md +++ b/packages/mistralai_azure/docs/models/prediction.md @@ -1,5 +1,7 @@ # Prediction +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + ## Fields diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/mistralai_azure/docs/models/responseformat.md index 23a1641b..5cab22f2 100644 --- a/packages/mistralai_azure/docs/models/responseformat.md +++ b/packages/mistralai_azure/docs/models/responseformat.md @@ -1,9 +1,11 @@ # ResponseFormat +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/mistralai_azure/docs/models/responseformats.md index 06886afe..2f5f1e55 100644 --- a/packages/mistralai_azure/docs/models/responseformats.md +++ b/packages/mistralai_azure/docs/models/responseformats.md @@ -1,7 +1,5 @@ # ResponseFormats -An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. - ## Values diff --git a/packages/mistralai_azure/docs/models/systemmessagecontent.md b/packages/mistralai_azure/docs/models/systemmessagecontent.md index e0d27d9f..0c87baf3 100644 --- a/packages/mistralai_azure/docs/models/systemmessagecontent.md +++ b/packages/mistralai_azure/docs/models/systemmessagecontent.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.SystemMessageContentChunks]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.SystemMessageContentChunks] = /* values here */ ``` diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index 95f656e2..a8fcb932 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.10 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no @@ -660,4 +660,4 @@ init-import=no # List of qualified module names which can have objects that can redefine # builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 016378d5..a1208cb7 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,10 +1,10 @@ [project] name = "mistralai_azure" -version = "1.6.0" +version = "1.7.9" description = "Python Client SDK for the Mistral AI API in Azure." -authors = [{ name = "Mistral" }] -requires-python = ">=3.10" +authors = [{ name = "Mistral" },] readme = "README.md" +requires-python = ">=3.9.2" dependencies = [ "httpcore >=1.0.9", "httpx >=0.28.1", @@ -13,36 +13,25 @@ dependencies = [ [dependency-groups] dev = [ - "mypy==1.15.0", - "pylint==3.2.3", - "pytest>=8.2.2,<9", - "pytest-asyncio>=0.23.7,<0.24", + "mypy ==1.15.0", + "pylint ==3.2.3", + "pyright ==1.1.398", + "pytest (>=8.2.2,<9.0.0)", + "pytest-asyncio (>=0.23.7,<0.24.0)", ] -[tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai_azure/py.typed"] - -[tool.hatch.build.targets.sdist] -include = ["src/mistralai_azure"] - -[tool.hatch.build.targets.sdist.force-include] -"py.typed" = "py.typed" -"src/mistralai_azure/py.typed" = "src/mistralai_azure/py.typed" - -[tool.hatch.build.targets.wheel] -include = ["src/mistralai_azure"] +[tool.setuptools.packages.find] +where = ["src"] -[tool.hatch.build.targets.wheel.sources] -"src/mistralai_azure" = "mistralai_azure" - -[virtualenvs] -in-project = true +[tool.setuptools.package-data] +"*" = ["py.typed"] [build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" +requires = ["setuptools>=80", "wheel"] +build-backend = "setuptools.build_meta" [tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] @@ -61,3 +50,5 @@ ignore_missing_imports = true [tool.pyright] venvPath = "." venv = ".venv" + + diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index 0c07c589..ef28dc10 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -1,6 +1,4 @@ #!/usr/bin/env bash -export UV_PUBLISH_TOKEN=${PYPI_TOKEN} - -uv run python ../../scripts/prepare_readme.py --repo-subdir packages/mistralai_azure -- uv build -uv publish +uv build +uv publish --token $PYPI_TOKEN diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index 5fd03467..71674e19 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai_azure" -__version__: str = "1.6.0" +__version__: str = "1.7.9" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.634.2 1.0.0 mistralai_azure" +__gen_version__: str = "2.789.5" +__user_agent__: str = "speakeasy-sdk/python 1.7.9 2.789.5 1.0.0 mistralai_azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 84738ce8..89f7dc49 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -15,9 +15,19 @@ class BaseSDK: sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ - def __init__(self, sdk_config: SDKConfiguration) -> None: + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: self.sdk_configuration = sdk_config + self.parent_ref = parent_ref def _get_url(self, base_url, url_variables): sdk_url, sdk_variables = self.sdk_configuration.get_server_details() @@ -50,6 +60,7 @@ def _build_request_async( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.async_client return self._build_request_with_client( @@ -70,6 +81,7 @@ def _build_request_async( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request( @@ -92,6 +104,7 @@ def _build_request( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.client return self._build_request_with_client( @@ -112,6 +125,7 @@ def _build_request( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request_with_client( @@ -135,6 +149,7 @@ def _build_request_with_client( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: query_params = {} @@ -150,6 +165,7 @@ def _build_request_with_client( query_params = utils.get_query_params( request if request_has_query_params else None, _globals if request_has_query_params else None, + allow_empty_value, ) else: # Pick up the query parameter from the override so they can be @@ -244,7 +260,7 @@ def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -265,7 +281,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res @@ -316,7 +332,7 @@ async def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -337,7 +353,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index 20184014..29c7d8f7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -5,6 +5,7 @@ from mistralai_azure._hooks import HookContext from mistralai_azure.types import OptionalNullable, UNSET from mistralai_azure.utils import eventstreaming +from mistralai_azure.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union @@ -47,7 +48,7 @@ def stream( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: + ) -> eventstreaming.EventStream[models.CompletionEvent]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -60,14 +61,14 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -128,6 +129,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -144,7 +146,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -159,32 +161,23 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -222,7 +215,7 @@ async def stream_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -235,14 +228,14 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -303,6 +296,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -319,7 +313,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -334,32 +328,23 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -405,7 +390,7 @@ def complete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Chat Completion :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. @@ -416,14 +401,14 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -486,6 +471,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -502,7 +488,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -512,33 +498,20 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -584,7 +557,7 @@ async def complete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Chat Completion :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. @@ -595,14 +568,14 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -665,6 +638,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -681,7 +655,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -691,30 +665,17 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py index 47b052cb..89560b56 100644 --- a/packages/mistralai_azure/src/mistralai_azure/httpclient.py +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -107,7 +107,6 @@ def close_clients( # to them from the owning SDK instance and they can be reaped. owner.client = None owner.async_client = None - if sync_client is not None and not sync_client_supplied: try: sync_client.close() diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index bc1a3f4f..140eec88 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -1,7 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from .mistralazureerror import MistralAzureError from typing import TYPE_CHECKING from importlib import import_module +import builtins +import sys if TYPE_CHECKING: from .assistantmessage import ( @@ -79,6 +82,7 @@ ) from .jsonschema import JSONSchema, JSONSchemaTypedDict from .mistralpromptmode import MistralPromptMode + from .no_response_error import NoResponseError from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict @@ -93,6 +97,7 @@ ) from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats + from .responsevalidationerror import ResponseValidationError from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( @@ -102,7 +107,18 @@ SystemMessageContentTypedDict, SystemMessageTypedDict, ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) from .textchunk import TextChunk, TextChunkTypedDict, Type + from .thinkchunk import ( + ThinkChunk, + ThinkChunkType, + ThinkChunkTypedDict, + Thinking, + ThinkingTypedDict, + ) from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict @@ -196,7 +212,9 @@ "LocTypedDict", "Messages", "MessagesTypedDict", + "MistralAzureError", "MistralPromptMode", + "NoResponseError", "OCRImageObject", "OCRImageObjectTypedDict", "OCRPageDimensions", @@ -217,6 +235,7 @@ "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", + "ResponseValidationError", "Role", "SDKError", "Security", @@ -225,10 +244,17 @@ "StopTypedDict", "SystemMessage", "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkType", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", @@ -319,6 +345,7 @@ "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "MistralPromptMode": ".mistralpromptmode", + "NoResponseError": ".no_response_error", "OCRImageObject": ".ocrimageobject", "OCRImageObjectTypedDict": ".ocrimageobject", "OCRPageDimensions": ".ocrpagedimensions", @@ -341,6 +368,7 @@ "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", "ResponseFormats": ".responseformats", + "ResponseValidationError": ".responsevalidationerror", "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", @@ -349,9 +377,16 @@ "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", "TextChunk": ".textchunk", "TextChunkTypedDict": ".textchunk", "Type": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkType": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", "Tool": ".tool", "ToolTypedDict": ".tool", "ToolCall": ".toolcall", @@ -379,6 +414,18 @@ } +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + def __getattr__(attr_name: str) -> object: module_name = _dynamic_imports.get(attr_name) if module_name is None: @@ -387,7 +434,7 @@ def __getattr__(attr_name: str) -> object: ) try: - module = import_module(module_name, __package__) + module = dynamic_import(module_name) result = getattr(module, attr_name) return result except ImportError as e: @@ -401,5 +448,5 @@ def __getattr__(attr_name: str) -> object: def __dir__(): - lazy_attrs = list(_dynamic_imports.keys()) - return sorted(lazy_attrs) + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 86f5ec09..7790eb10 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -25,7 +25,7 @@ ) -AssistantMessageRole = Literal["assistant"] +AssistantMessageRole = Literal["assistant",] class AssistantMessageTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py index a78b72d5..81caf52b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py @@ -10,7 +10,14 @@ ChatCompletionChoiceFinishReason = Union[ - Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, ] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index 8dffe1bd..ecb33b81 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -63,11 +63,13 @@ "ChatCompletionRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionRequestToolChoice = TypeAliasType( "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionRequestTypedDict(TypedDict): @@ -88,16 +90,21 @@ class ChatCompletionRequestTypedDict(TypedDict): random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] @@ -130,23 +137,28 @@ class ChatCompletionRequest(BaseModel): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 5fced93e..d13faa08 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -59,11 +59,13 @@ "ChatCompletionStreamRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionStreamRequestToolChoice = TypeAliasType( "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionStreamRequestTypedDict(TypedDict): @@ -83,16 +85,21 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] @@ -124,23 +131,28 @@ class ChatCompletionStreamRequest(BaseModel): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py index 1a492204..3afecac9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py @@ -10,7 +10,15 @@ from typing_extensions import Annotated, TypedDict -FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr] +FinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] class CompletionResponseStreamChoiceTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py index 23ff71a6..ea8d5625 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py @@ -13,7 +13,7 @@ from typing_extensions import NotRequired, TypedDict -DocumentURLChunkType = Literal["document_url"] +DocumentURLChunkType = Literal["document_url",] class DocumentURLChunkTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py index 1d22d97a..56607d94 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py @@ -2,7 +2,9 @@ from __future__ import annotations from .validationerror import ValidationError -from mistralai_azure import utils +from dataclasses import dataclass, field +import httpx +from mistralai_azure.models import MistralAzureError from mistralai_azure.types import BaseModel from typing import List, Optional @@ -11,11 +13,16 @@ class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None -class HTTPValidationError(Exception): - data: HTTPValidationErrorData +@dataclass(unsafe_hash=True) +class HTTPValidationError(MistralAzureError): + data: HTTPValidationErrorData = field(hash=False) - def __init__(self, data: HTTPValidationErrorData): - self.data = data - - def __str__(self) -> str: - return utils.marshal_json(self.data, HTTPValidationErrorData) + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) + object.__setattr__(self, "data", data) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py index 734d7f79..a40e451c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py @@ -15,7 +15,7 @@ ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) -ImageURLChunkType = Literal["image_url"] +ImageURLChunkType = Literal["image_url",] class ImageURLChunkTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py index bd4584a5..22fb6438 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py @@ -5,4 +5,4 @@ from typing import Literal, Union -MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py index 888337d3..b23a935c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py @@ -10,11 +10,15 @@ class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + type: Literal["content"] content: NotRequired[str] class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + TYPE: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py b/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py index 4df3bfbc..32d2ca68 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -ReferenceChunkType = Literal["reference"] +ReferenceChunkType = Literal["reference",] class ReferenceChunkTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index 6d09de5b..c989f3a4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -16,14 +16,16 @@ class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: NotRequired[ResponseFormats] - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: Optional[ResponseFormats] = None - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: OptionalNullable[JSONSchema] = UNSET diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py index 08c39951..cbf83ce7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py @@ -4,5 +4,8 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object", "json_schema"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py b/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py index 03216cbf..216d7f8f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py @@ -1,22 +1,40 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from dataclasses import dataclass -from typing import Optional import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai_azure.models import MistralAzureError + +MAX_MESSAGE_LEN = 10_000 + + +@dataclass(unsafe_hash=True) +class SDKError(MistralAzureError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + if message: + message += ": " + message += f"Status {raw_response.status_code}" -@dataclass -class SDKError(Exception): - """Represents an error returned by the API.""" + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" - message: str - status_code: int = -1 - body: str = "" - raw_response: Optional[httpx.Response] = None + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" - def __str__(self): - body = "" - if len(self.body) > 0: - body = f"\n{self.body}" + message += f". Body: {body_display}" + message = message.strip() - return f"{self.message}: Status {self.status_code}{body}" + super().__init__(message, raw_response, body) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py index b7d975b6..f99bf4ff 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py @@ -1,23 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) from mistralai_azure.types import BaseModel from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( - "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], ) SystemMessageContent = TypeAliasType( - "SystemMessageContent", Union[str, List[TextChunk]] + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] ) -Role = Literal["system"] +Role = Literal["system",] class SystemMessageTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py index be60c8f9..5845456e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -Type = Literal["text"] +Type = Literal["text",] class TextChunkTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py b/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py index 8e6a6ad8..01f6f677 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py @@ -4,4 +4,9 @@ from typing import Literal -ToolChoiceEnum = Literal["auto", "none", "any", "required"] +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py index abca8abe..4bc5c9a9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -22,7 +22,7 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool"] +ToolMessageRole = Literal["tool",] class ToolMessageTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py b/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py index dfcd31f0..638890c5 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py @@ -5,4 +5,4 @@ from typing import Literal, Union -ToolTypes = Union[Literal["function"], UnrecognizedStr] +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py index 05976fc0..85fedb4b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -16,7 +16,7 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user"] +UserMessageRole = Literal["user",] class UserMessageTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/ocr.py b/packages/mistralai_azure/src/mistralai_azure/ocr.py index 71fe0337..8702300f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/ocr.py +++ b/packages/mistralai_azure/src/mistralai_azure/ocr.py @@ -4,6 +4,7 @@ from mistralai_azure import models, utils from mistralai_azure._hooks import HookContext from mistralai_azure.types import Nullable, OptionalNullable, UNSET +from mistralai_azure.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union @@ -28,7 +29,7 @@ def process( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.OCRResponse]: + ) -> models.OCRResponse: r"""OCR :param model: @@ -87,6 +88,7 @@ def process( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.OCRRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -103,7 +105,7 @@ def process( config=self.sdk_configuration, base_url=base_url or "", operation_id="ocr_v1_ocr_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -113,31 +115,20 @@ def process( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.OCRResponse]) + return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def process_async( self, @@ -159,7 +150,7 @@ async def process_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.OCRResponse]: + ) -> models.OCRResponse: r"""OCR :param model: @@ -218,6 +209,7 @@ async def process_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.OCRRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -234,7 +226,7 @@ async def process_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="ocr_v1_ocr_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -244,28 +236,17 @@ async def process_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.OCRResponse]) + return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index dd4aa4b3..56164cf3 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -2,6 +2,8 @@ from typing import TYPE_CHECKING from importlib import import_module +import builtins +import sys if TYPE_CHECKING: from .annotations import get_discriminator @@ -158,6 +160,18 @@ } +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + def __getattr__(attr_name: str) -> object: module_name = _dynamic_imports.get(attr_name) if module_name is None: @@ -166,9 +180,8 @@ def __getattr__(attr_name: str) -> object: ) try: - module = import_module(module_name, __package__) - result = getattr(module, attr_name) - return result + module = dynamic_import(module_name) + return getattr(module, attr_name) except ImportError as e: raise ImportError( f"Failed to import {attr_name} from {module_name}: {e}" @@ -180,5 +193,5 @@ def __getattr__(attr_name: str) -> object: def __dir__(): - lazy_attrs = list(_dynamic_imports.keys()) - return sorted(lazy_attrs) + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py index 387874ed..12e0aa4f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py @@ -3,6 +3,7 @@ from enum import Enum from typing import Any, Optional + def get_discriminator(model: Any, fieldname: str, key: str) -> str: """ Recursively search for the discriminator attribute in a model. @@ -25,31 +26,54 @@ def get_field_discriminator(field: Any) -> Optional[str]: if isinstance(field, dict): if key in field: - return f'{field[key]}' + return f"{field[key]}" if hasattr(field, fieldname): attr = getattr(field, fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" if hasattr(field, upper_fieldname): attr = getattr(field, upper_fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" return None + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None if isinstance(model, list): for field in model: - discriminator = get_field_discriminator(field) + discriminator = search_nested_discriminator(field) if discriminator is not None: return discriminator - discriminator = get_field_discriminator(model) + discriminator = search_nested_discriminator(model) if discriminator is not None: return discriminator - raise ValueError(f'Could not find discriminator field {fieldname} in {model}') + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py b/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py index 74a63f75..0969899b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py @@ -17,6 +17,9 @@ class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] @@ -25,9 +28,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref def __iter__(self): return self @@ -43,6 +48,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] @@ -51,9 +59,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref def __aiter__(self): return self diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py index e873495f..f961e76b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py @@ -142,16 +142,21 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files + array_field_name = f_name + "[]" for file_obj in val: if not _is_set(file_obj): continue - - file_name, content, content_type = _extract_file_properties(file_obj) + + file_name, content, content_type = _extract_file_properties( + file_obj + ) if content_type is not None: - files.append((f_name + "[]", (file_name, content, content_type))) + files.append( + (array_field_name, (file_name, content, content_type)) + ) else: - files.append((f_name + "[]", (file_name, content))) + files.append((array_field_name, (file_name, content))) else: # Handle single file file_name, content, content_type = _extract_file_properties(val) @@ -161,11 +166,16 @@ def serialize_multipart_form( else: files.append((f_name, (file_name, content))) elif field_metadata.json: - files.append((f_name, ( - None, - marshal_json(val, request_field_types[name]), - "application/json", - ))) + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) else: if isinstance(val, List): values = [] @@ -175,7 +185,8 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - form[f_name + "[]"] = values + array_field_name = f_name + "[]" + form[array_field_name] = values else: form[f_name] = _val_to_string(val) return media_type, form, files diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py b/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py index 37a6e7f9..c04e0db8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py @@ -27,12 +27,13 @@ def get_query_params( query_params: Any, gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, ) -> Dict[str, List[str]]: params: Dict[str, List[str]] = {} - globals_already_populated = _populate_query_params(query_params, gbls, params, []) + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated) + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) return params @@ -42,6 +43,7 @@ def _populate_query_params( gbls: Any, query_param_values: Dict[str, List[str]], skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, ) -> List[str]: globals_already_populated: List[str] = [] @@ -69,6 +71,16 @@ def _populate_query_params( globals_already_populated.append(name) f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + serialization = metadata.serialization if serialization is not None: serialized_parms = _get_serialized_params( diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py index 4d608671..88a91b10 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py @@ -3,7 +3,9 @@ import asyncio import random import time -from typing import List +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional import httpx @@ -51,9 +53,11 @@ def __init__(self, config: RetryConfig, status_codes: List[str]): class TemporaryError(Exception): response: httpx.Response + retry_after: Optional[int] def __init__(self, response: httpx.Response): self.response = response + self.retry_after = _parse_retry_after_header(response) class PermanentError(Exception): @@ -63,6 +67,62 @@ def __init__(self, inner: Exception): self.inner = inner +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + def retry(func, retries: Retries): if retries.config.strategy == "backoff": @@ -183,8 +243,10 @@ def retry_with_backoff( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) time.sleep(sleep) retries += 1 @@ -211,7 +273,9 @@ async def retry_with_backoff_async( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) await asyncio.sleep(sleep) retries += 1 diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index 76e44d71..378a14c0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -192,7 +192,9 @@ def is_union(obj: object) -> bool: """ Returns True if the given object is a typing.Union or typing_extensions.Union. """ - return any(obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union")) + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) def stream_to_text(stream: httpx.Response) -> str: @@ -245,4 +247,3 @@ def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: f"Neither typing nor typing_extensions has an object called {name!r}" ) return result - diff --git a/packages/mistralai_azure/uv.lock b/packages/mistralai_azure/uv.lock index d77ea936..b28ae820 100644 --- a/packages/mistralai_azure/uv.lock +++ b/packages/mistralai_azure/uv.lock @@ -1,6 +1,6 @@ version = 1 revision = 3 -requires-python = ">=3.10" +requires-python = ">=3.9.2" resolution-markers = [ "python_full_version >= '3.12'", "python_full_version == '3.11.*'", @@ -154,7 +154,7 @@ wheels = [ [[package]] name = "mistralai-azure" -version = "1.6.0" +version = "1.7.9" source = { editable = "." } dependencies = [ { name = "httpcore" }, @@ -166,6 +166,7 @@ dependencies = [ dev = [ { name = "mypy" }, { name = "pylint" }, + { name = "pyright" }, { name = "pytest" }, { name = "pytest-asyncio" }, ] @@ -181,8 +182,9 @@ requires-dist = [ dev = [ { name = "mypy", specifier = "==1.15.0" }, { name = "pylint", specifier = "==3.2.3" }, - { name = "pytest", specifier = ">=8.2.2,<9" }, - { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, + { name = "pyright", specifier = "==1.1.398" }, + { name = "pytest", specifier = ">=8.2.2,<9.0.0" }, + { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24.0" }, ] [[package]] @@ -238,6 +240,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695, upload-time = "2023-02-04T12:11:25.002Z" }, ] +[[package]] +name = "nodeenv" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, +] + [[package]] name = "packaging" version = "24.1" @@ -402,12 +413,26 @@ dependencies = [ { name = "platformdirs" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "tomlkit" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, ] +[[package]] +name = "pyright" +version = "1.1.398" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/d6/48740f1d029e9fc4194880d1ad03dcf0ba3a8f802e0e166b8f63350b3584/pyright-1.1.398.tar.gz", hash = "sha256:357a13edd9be8082dc73be51190913e475fa41a6efb6ec0d4b7aab3bc11638d8", size = 3892675, upload-time = "2025-03-26T10:06:06.063Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/e0/5283593f61b3c525d6d7e94cfb6b3ded20b3df66e953acaf7bb4f23b3f6e/pyright-1.1.398-py3-none-any.whl", hash = "sha256:0a70bfd007d9ea7de1cf9740e1ad1a40a122592cfe22a3f6791b06162ad08753", size = 5780235, upload-time = "2025-03-26T10:06:03.994Z" }, +] + [[package]] name = "pytest" version = "8.3.2" diff --git a/packages/mistralai_gcp/.gitignore b/packages/mistralai_gcp/.gitignore index 5a82b069..60154874 100644 --- a/packages/mistralai_gcp/.gitignore +++ b/packages/mistralai_gcp/.gitignore @@ -1,3 +1,8 @@ +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ +.env +.env.local .speakeasy/reports README-PYPI.md .venv/ diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 5e157235..86df57c3 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,38 +1,669 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 28fe1ab59b4dee005217f2dbbd836060 - docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 - releaseVersion: 1.6.0 - configChecksum: 66bf5911f59189922e03a75a72923b32 + docChecksum: 05fc6f45406deac180ffc1df760c67f4 + docVersion: 1.0.0 + speakeasyVersion: 1.681.0 + generationVersion: 2.789.5 + releaseVersion: 2.0.1 + configChecksum: 9f60a8d08c4c55a7516f60867aaaf52c published: true +persistentEdits: + generation_id: b156a09b-7c17-408c-b04e-9e245415034f + pristine_commit_hash: e1eba400d7842b07c22f18620eb08f293bdda256 + pristine_tree_hash: ab6d8cbce20a025aeb36a7bdece62c58cf5c4579 features: python: additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.23.15 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.1 + examples: 3.0.2 flatRequests: 1.0.1 - globalSecurity: 3.0.3 + globalSecurity: 3.0.4 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.0 + globalServerURLs: 3.2.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 nullables: 1.0.1 - openEnums: 1.0.0 + openEnums: 1.0.1 responseFormat: 1.0.1 - retries: 3.0.2 - sdkHooks: 1.0.1 - serverEvents: 1.0.7 + retries: 3.0.3 + sdkHooks: 1.2.0 + serverEvents: 1.0.11 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.4 + unions: 3.1.1 +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 + pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/assistantmessagerole.md: + id: bb5d2a4bc72f + last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 + pristine_git_object: 658229e77eb6419391cf7941568164541c528387 + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 + docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:a03f197e7952c3c2dae6dc1d178ba71b754c8c9b + pristine_git_object: 48103e3075554b3d95744d328d42e27a5ce0668d + docs/models/chatcompletionrequestmessages.md: + id: ec996b350e12 + last_write_checksum: sha1:2ecec8d12cdb48426f4eb62732066fc79fcd4ec3 + pristine_git_object: bc7708a67f06d74e8a5bf1facb2b23fb1e08053c + docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:abc7218235cbc12c51a0e2f9e60285f39264574e + pristine_git_object: aaacc09c97c02e314e9d48b365132f1e2b956f81 + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:dcf4b125b533192cb5aea1a68551866954712dc5 + pristine_git_object: c807dacd98eb3561ee45f40db71a92cb72b0f6de + docs/models/content.md: + id: bfd859c99f86 + last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 + pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d + pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 + pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + docs/models/fimcompletionrequest.md: + id: b44677ecc293 + last_write_checksum: sha1:ecd3e647a9f48c7252ef78ceee352e2ae7a59738 + pristine_git_object: 380f109c03d513ff6d32aac9471deb80c91d0ada + docs/models/fimcompletionrequeststop.md: + id: ea5475297a83 + last_write_checksum: sha1:a6cdb4bda01ac58016a71f35da48a5d10df11623 + pristine_git_object: a0dbb00a82a03acc8b62b81d7597722a6ca46118 + docs/models/fimcompletionresponse.md: + id: 050d62ba2fac + last_write_checksum: sha1:a6101a69e83b7a5bcf96ec77ba1cab8748f734f4 + pristine_git_object: cd62d0349503fd8b13582d0ba47ab9cff40f6b28 + docs/models/fimcompletionstreamrequest.md: + id: c881d7e27637 + last_write_checksum: sha1:8a01721942e6bda7d243c3c5dad20d39e48a32b0 + pristine_git_object: a890ff2b034dbcd09a7ea56e4de6c826699f84df + docs/models/fimcompletionstreamrequeststop.md: + id: c97a11b764e9 + last_write_checksum: sha1:958d5087050fdeb128745884ebcf565b4fdc3886 + pristine_git_object: 5a9e2ff020d4939f7fd42c0673ea7bdd16cca99d + docs/models/finishreason.md: + id: 73315c2a39b3 + last_write_checksum: sha1:5b58c7fa9219f728b9731287e21abe1be9f11e4a + pristine_git_object: 45a5aedb7241cf080df3eb976a4413064d314009 + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/httpvalidationerror.md: + id: a211c095f2ac + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 + pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 + pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 + docs/models/imageurlchunkimageurl.md: + id: c7fae88454ce + last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 + pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 + docs/models/imageurlchunktype.md: + id: b9af2db9ff60 + last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 + pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/messages.md: + id: 2103cd675c2f + last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 + pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 + pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 + pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc + docs/models/referencechunktype.md: + id: 0944b80ea9c8 + last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 + pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/role.md: + id: b694540a5b1e + last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d + pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 + pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 + docs/models/stop.md: + id: f231cc9f5041 + last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 + pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f + pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:6d41d1991d122805734ed0d90ee01592aa5ae6ff + pristine_git_object: 6daab3c381bd8c13d2935bf62578648a8470fc76 + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b + pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab + docs/models/thinkchunktype.md: + id: 0fbeed985341 + last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 + pristine_git_object: baf6f755252d027295be082b53ecf80555039414 + docs/models/thinking.md: + id: 07234f8dd364 + last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 + pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 + pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/toolmessagerole.md: + id: f333d4d1ab56 + last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f + pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/type.md: + id: 98c32f09b2c8 + last_write_checksum: sha1:8aa9ca999e9648ddc2240bf80780684e3e858ddf + pristine_git_object: eb0581e7174b6951d69c485a64af5244cb8687fa + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed + pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/usermessagerole.md: + id: 99ffa937c462 + last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 + pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + pylintrc: + id: 7ce8b9f946e6 + last_write_checksum: sha1:6b615d49741eb9ae16375d3a499767783d1128a1 + pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 + pyproject.toml: + id: 5d07e7d72637 + last_write_checksum: sha1:0885322cdc05d4e6e2973b5b69c52e4b45ae8695 + pristine_git_object: 65318b2bd8e129241dffbb5080d6f2c8f67c698f + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:adc9b741c12ad1591ab4870eabe20f0d0a86cd1a + pristine_git_object: ef28dc10c60d7d6a4bac0c6a1e9caba36b471861 + src/mistralai_gcp/__init__.py: + id: b6565f49e73b + last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b + pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + src/mistralai_gcp/_hooks/__init__.py: + id: 663f3129700b + last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d + pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + src/mistralai_gcp/_hooks/sdkhooks.py: + id: 46ab7e644296 + last_write_checksum: sha1:a2c018871bea47706a76b03d9a17dab717c339c0 + pristine_git_object: b81c2a2739d316cfed54daec48df6375155eb802 + src/mistralai_gcp/_hooks/types.py: + id: 1f77198243ec + last_write_checksum: sha1:fbf5f1fb258b75133c6b12ae688c03c18b2debd5 + pristine_git_object: f8088f4c26d3ae27202c716c80c391d4daed4156 + src/mistralai_gcp/_version.py: + id: 4e2b8c406f49 + last_write_checksum: sha1:ab865bdd35ca19dbeb7d5d1377430b3d72c33718 + pristine_git_object: 17d5ca642c81b9a6e5e6ef97d84539b7fe506908 + src/mistralai_gcp/basesdk.py: + id: b84fa6111b69 + last_write_checksum: sha1:41381dd799bd5e1f8a15bb65a0672dc6cc4796c4 + pristine_git_object: 7a93de23ad83096b2392e05b11f29030b5961456 + src/mistralai_gcp/chat.py: + id: 1cc7d54332ba + last_write_checksum: sha1:baa159843f45b9f8f0670d607dd4ca772ce9b0d2 + pristine_git_object: 3f73b1ecf8b136c1b8fe82236ab7aa873b6d1ada + src/mistralai_gcp/fim.py: + id: 1e5bec08157c + last_write_checksum: sha1:609125292f5828193ea7d2cddd562c556e682d5a + pristine_git_object: 0fb6727405017034e9a1950177312327156b7625 + src/mistralai_gcp/httpclient.py: + id: 7de4ac861042 + last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 + pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + src/mistralai_gcp/models/__init__.py: + id: 9a7b2a1f0dba + last_write_checksum: sha1:54654df1aecc8d4f634ebd4dbcb0fed16da80309 + pristine_git_object: fe85b133a3a7652cfcfd3b44074be3729c8a9b7b + src/mistralai_gcp/models/assistantmessage.py: + id: 0779dd85c548 + last_write_checksum: sha1:ccf5d6a93bf007d47f0415320afb047278e10172 + pristine_git_object: 17d740b6eeb433b2865a652d1dd760227ad38191 + src/mistralai_gcp/models/chatcompletionchoice.py: + id: b5843c853153 + last_write_checksum: sha1:05894fb4e252dbf6a94c43a3042f7ec53f3e46a0 + pristine_git_object: 06c955ccba9e1956fad222b9f59d8d6aa7d4f9a5 + src/mistralai_gcp/models/chatcompletionrequest.py: + id: 42d6cdf4646f + last_write_checksum: sha1:178150840664484614e19fbaa07ccaf655178990 + pristine_git_object: d693e3c33d25481b2a1617898bc4dee818d120e9 + src/mistralai_gcp/models/chatcompletionresponse.py: + id: 14720f23411e + last_write_checksum: sha1:46f14c3e00d21e9f01756f111d353768ad939494 + pristine_git_object: a7953eb156cc8185d70f92df8a75a2ebb77840b9 + src/mistralai_gcp/models/chatcompletionstreamrequest.py: + id: 2e17680adc7e + last_write_checksum: sha1:4aa2f8ec9c2331012225fe0810e27d9faab694df + pristine_git_object: c2d2512813f97283d1f158efa89217fa497eea93 + src/mistralai_gcp/models/completionchunk.py: + id: 7fa670acf4b8 + last_write_checksum: sha1:0d0fdb8efda7f0b6a8ff376b7da94cac8060e4e2 + pristine_git_object: ca002f52239f69b96dd967b5e91cb4ed544e51d0 + src/mistralai_gcp/models/completionevent.py: + id: c25e6676e263 + last_write_checksum: sha1:528f13beedc9befc6fb71d4f9f2a2d4ff5e91817 + pristine_git_object: 33278c119c62205b8d9b09297066dc61c2a86cd1 + src/mistralai_gcp/models/completionresponsestreamchoice.py: + id: 46946832a23e + last_write_checksum: sha1:15ede6989e929283ac713f75a2adbd6a903d614e + pristine_git_object: c90a68c9137e382ad85a2ed72765dfcaf24e6453 + src/mistralai_gcp/models/contentchunk.py: + id: 96dd7160dff7 + last_write_checksum: sha1:484722b90615ca7af20993c570de79fe990a50f2 + pristine_git_object: da5671e348d363927af77188da6af07240398826 + src/mistralai_gcp/models/deltamessage.py: + id: db6c3c4d3384 + last_write_checksum: sha1:e596610fa0dd100203cd7e515750782bfbdb0445 + pristine_git_object: 1801ac76522df2efc362712d46262aeba95abc87 + src/mistralai_gcp/models/fimcompletionrequest.py: + id: ed8593c435af + last_write_checksum: sha1:52c89daf22c7fc6106a32f5b37a9006c053bcadb + pristine_git_object: 12af226c7d6991f92cc7c56176ec3db773b5268f + src/mistralai_gcp/models/fimcompletionresponse.py: + id: 5f85a7cdb5fd + last_write_checksum: sha1:3ac2057157c7d1cb1bfc81fca2915ba72546f968 + pristine_git_object: e1940b0a2290fc3f9afcbd9e945397b1b90660ec + src/mistralai_gcp/models/fimcompletionstreamrequest.py: + id: f17c4f8fa580 + last_write_checksum: sha1:5729d943ce173ad65bc0d2cd1b6376c5eeda7116 + pristine_git_object: ba7a66d28aae921a4462043d578bbdd6c4b6b784 + src/mistralai_gcp/models/function.py: + id: 4612d6f83b9a + last_write_checksum: sha1:7692ea8102475e4d82d83722a8aea1efde668774 + pristine_git_object: 7ad1ae645f99ab13c022c92e7733ff4b15d39cac + src/mistralai_gcp/models/functioncall.py: + id: a3ca765a9368 + last_write_checksum: sha1:e044de5b26b15d46dce8ad8bd0d13bdf3d24ef7d + pristine_git_object: 99554c8862922184a05074bf01f71fbe20ac8fea + src/mistralai_gcp/models/functionname.py: + id: f97eb2c1bae3 + last_write_checksum: sha1:6343e5b4f724db6088c2055b058a9ebdd9bda995 + pristine_git_object: 00ec22f5ca6ff2d68d5cce2a020846a672ab0a1b + src/mistralai_gcp/models/httpvalidationerror.py: + id: f1ac6b7c81f3 + last_write_checksum: sha1:8e98e27a5440e2e1dbe330d1c889d43919d90b51 + pristine_git_object: 79609351e675148ef074988bb6ea8a11b81087dc + src/mistralai_gcp/models/imageurl.py: + id: 1668e9d55730 + last_write_checksum: sha1:2b8eaac00c956beb87434f8d5a21dff12611c788 + pristine_git_object: 20d4ba7719a6c04d2c7864459a68cca808e1a3f2 + src/mistralai_gcp/models/imageurlchunk.py: + id: ebc4dfed0347 + last_write_checksum: sha1:5c625584449139a410138c9986323d1f86b52735 + pristine_git_object: ddb53f21a13aeed7884e213e92752de1870d9fb5 + src/mistralai_gcp/models/jsonschema.py: + id: 4c32e4fa593e + last_write_checksum: sha1:3c972f731f2bd92262ea04a65771c093254d3a5f + pristine_git_object: 26914b2f8562da07e2d54d68a5806bedd32ec16a + src/mistralai_gcp/models/mistralgcperror.py: + id: 690cf29f596b + last_write_checksum: sha1:0ec55c68e3daccf2aba3c52f0a7c77ad5102f4c9 + pristine_git_object: fec729a590b2ea981e01f4af99d8b36ba52b4664 + src/mistralai_gcp/models/mistralpromptmode.py: + id: d2ba58ed5184 + last_write_checksum: sha1:8518548e80dcd8798ee72c2557c473327ba9289b + pristine_git_object: 1440f6ea9d18139ce5f10eb38d951b0995f74a20 + src/mistralai_gcp/models/no_response_error.py: + id: 7a773ba0687f + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai_gcp/models/prediction.py: + id: cd3b43190e22 + last_write_checksum: sha1:a0411a8e3b1ecb42b91405dd9ee2a2ee5f3fad59 + pristine_git_object: 36c87ab046ed9f1a28a371fbdc5c7d584d71b6d3 + src/mistralai_gcp/models/referencechunk.py: + id: ee00a52fb6dd + last_write_checksum: sha1:d0c05b6b1e7d085833d4a9ef85f1e0088c86d3a5 + pristine_git_object: 904e8b8250570371e2b59895196986a45e6d3562 + src/mistralai_gcp/models/responseformat.py: + id: ad17dac36a51 + last_write_checksum: sha1:296d4b52f934c48490b71d85e1e9d0e207cee21a + pristine_git_object: 9fe5116ca46d713f5f23c92ec1de8a73c5124408 + src/mistralai_gcp/models/responseformats.py: + id: deb9c36c5ec5 + last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + src/mistralai_gcp/models/responsevalidationerror.py: + id: 78e210042d35 + last_write_checksum: sha1:b8ba70238453017393e721c7d61b5f1e268d7c17 + pristine_git_object: ebd4f214747d451dc2733d6ea838c67bb0c84797 + src/mistralai_gcp/models/sdkerror.py: + id: beed68eccaa1 + last_write_checksum: sha1:a058f2519ec22f72457e800600be469f13ff9915 + pristine_git_object: 7f53bbcd548d15f4fdd529bd3caea5249eb5e8e6 + src/mistralai_gcp/models/security.py: + id: 32f877bd1399 + last_write_checksum: sha1:7bad1150440143f9f6faefe33911edf6c2afdec1 + pristine_git_object: 3857494264c0444d330c54570483710a5ed321f0 + src/mistralai_gcp/models/systemmessage.py: + id: 13826cd6cb74 + last_write_checksum: sha1:876e84816c4e27ad77d6313777ba841ea3086cf9 + pristine_git_object: d74bdf3255bac53335eea08a6010cf1cc19380dd + src/mistralai_gcp/models/systemmessagecontentchunks.py: + id: 8233735d37db + last_write_checksum: sha1:38fedfdb83824054a1734bcc7d39e7e040bf4792 + pristine_git_object: e0b5bbc30828cbf572e603efc86ee2695102ea31 + src/mistralai_gcp/models/textchunk.py: + id: a330626b2006 + last_write_checksum: sha1:b801cf9b1913a70841c8fbdc9d433f0380ea82d8 + pristine_git_object: c4a8cf28cd2281cfda40cefa70ce1bd64d3e750d + src/mistralai_gcp/models/thinkchunk.py: + id: c38f6a213cc1 + last_write_checksum: sha1:a072f3bf01c2dc90ef6cc1b188b2e00e15923e07 + pristine_git_object: b88c0cb54c6926b3c896b3c192c5f3c51c676a51 + src/mistralai_gcp/models/tool.py: + id: 86b94d6a3bcb + last_write_checksum: sha1:e90d1a8859bf737e7216527dd83fcacf7d2b0a8f + pristine_git_object: a1d477da51b1c65e0d5bd6aeac6dc5fb1441a1b4 + src/mistralai_gcp/models/toolcall.py: + id: 3047e78c2ac3 + last_write_checksum: sha1:0007cec80ddaed1a0d53910cbf9f212d49f14a99 + pristine_git_object: ecbac8d6eace38ed4045888c56e9e2ba82bfaabc + src/mistralai_gcp/models/toolchoice.py: + id: 1f3d5233426e + last_write_checksum: sha1:167e09e870d44fa0dbaea61b651cb866afc4fd30 + pristine_git_object: dc213e622a4a6d9e3cc097e6b7c7196ff72db873 + src/mistralai_gcp/models/toolchoiceenum.py: + id: b4431b9cf3fd + last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + src/mistralai_gcp/models/toolmessage.py: + id: e21a2326eb31 + last_write_checksum: sha1:c332f72e272fff7970f52e2b15223a2898ce9b15 + pristine_git_object: d6aa2621b83bde261fab7bd15f58273861f88738 + src/mistralai_gcp/models/tooltypes.py: + id: b4c1716d51b3 + last_write_checksum: sha1:0f8fe0c437736eb584cce298a5e72c4e25f7c42b + pristine_git_object: 8b812ae0cfee81a1cd8ab0180e65f57d19a0dcbd + src/mistralai_gcp/models/usageinfo.py: + id: 574d1999c265 + last_write_checksum: sha1:a0a88fe5b3cae9317781b99cb3cc1916a9ba17cc + pristine_git_object: 59f36158761c3a86900256a6ed73845c455417c7 + src/mistralai_gcp/models/usermessage.py: + id: cf3691ffafa6 + last_write_checksum: sha1:d0ed86a67403d65ed6ac7a31aa5f73e19ecfa670 + pristine_git_object: 0168b45235bc891888c095565af832535dd26139 + src/mistralai_gcp/models/validationerror.py: + id: f1a6468621bd + last_write_checksum: sha1:a4cc5969f12e00be3506edc90ec21a01d5415eff + pristine_git_object: 033d4b63d1c321ae2c49e8684b34817adddca4c2 + src/mistralai_gcp/py.typed: + id: 7f25f97fed44 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai_gcp/sdk.py: + id: 491d22534344 + last_write_checksum: sha1:82db0216d5a051dc0a59a4c5633f30be95bab024 + pristine_git_object: a5871bfc8f1719afce684ff0c51d73d284f4dd3d + src/mistralai_gcp/sdkconfiguration.py: + id: 84fd7d3e219a + last_write_checksum: sha1:df51450c87f807c849e2aefb0a154aa4426fd8e3 + pristine_git_object: cf85c47e5e33956a64ddea53d85cdb7cc4bb687e + src/mistralai_gcp/types/__init__.py: + id: 15a92fdbd0a1 + last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed + pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + src/mistralai_gcp/types/basemodel.py: + id: 0dd6dc277359 + last_write_checksum: sha1:615d0b364fa924b0fef719958df34596cc7c1ae2 + pristine_git_object: 231c2e37283a76082f1a064c7aae47f8ee4ee694 + src/mistralai_gcp/utils/__init__.py: + id: bb44726e5fa4 + last_write_checksum: sha1:81e0385b93362e0f3f6911b65bd4cc601ebc11e1 + pristine_git_object: 56164cf3a86399ee7a8e1a68d19fb494689d77c3 + src/mistralai_gcp/utils/annotations.py: + id: aeecca0c40a3 + last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc + pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + src/mistralai_gcp/utils/datetimes.py: + id: e3e3bb6cb264 + last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 + pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai_gcp/utils/enums.py: + id: 9f020fc8d361 + last_write_checksum: sha1:786ba597f79dca6fbc0d87c591752bb8d775ecb7 + pristine_git_object: c3bc13cfc48794c143a64667f02e7949a8ce3fcc + src/mistralai_gcp/utils/eventstreaming.py: + id: d570df9074cf + last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b + pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + src/mistralai_gcp/utils/forms.py: + id: fe642748c385 + last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 + pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + src/mistralai_gcp/utils/headers.py: + id: 0cb933d098ed + last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 + pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + src/mistralai_gcp/utils/logger.py: + id: 2992f9bda9c7 + last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 + pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 + src/mistralai_gcp/utils/metadata.py: + id: af274ae68c93 + last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 + pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + src/mistralai_gcp/utils/queryparams.py: + id: b20aa8da5982 + last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 + pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + src/mistralai_gcp/utils/requestbodies.py: + id: 1a2ddaa8f5a2 + last_write_checksum: sha1:e0a3a78158eba39880475d62d61be906625676b8 + pristine_git_object: d5240dd5f5efffabbd9aefa2f4a349511a9c75b4 + src/mistralai_gcp/utils/retries.py: + id: 8caeba1fe4ab + last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 + pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + src/mistralai_gcp/utils/security.py: + id: fa4f52aaad5d + last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 + pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 + src/mistralai_gcp/utils/serializers.py: + id: 920ccb5c87f2 + last_write_checksum: sha1:a0d184ace7371a14a7d005cca7f358a03e3d4b07 + pristine_git_object: 378a14c0f86a867ca7b0eb7e620da82234c0ccc4 + src/mistralai_gcp/utils/unmarshal_json_response.py: + id: 65d5fa644cf8 + last_write_checksum: sha1:877dd4bb58700039a481fdf7d7216d2d9a0b3e92 + pristine_git_object: c168a293f7018fc3b83cac0d8f723475e5f05631 + src/mistralai_gcp/utils/url.py: + id: 116eb5a78ca7 + last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 + pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + src/mistralai_gcp/utils/values.py: + id: 9cc9ee47c951 + last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 + pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + responses: + "422": + application/json: {} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + stream_fim: + speakeasy-default-stream-fim: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + responses: + "422": + application/json: {} + fim_completion_v1_fim_completions_post: + userExample: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} +examplesVersion: 1.0.2 +generatedTests: {} generatedFiles: - .gitattributes - .python-version @@ -172,40 +803,3 @@ generatedFiles: - src/mistralai_gcp/utils/serializers.py - src/mistralai_gcp/utils/url.py - src/mistralai_gcp/utils/values.py -examples: - stream_chat: - speakeasy-default-stream-chat: - requestBody: - application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} - responses: - "422": - application/json: {} - "200": {} - chat_completion_v1_chat_completions_post: - speakeasy-default-chat-completion-v1-chat-completions-post: - requestBody: - application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} - responses: - "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} - "422": - application/json: {} - stream_fim: - speakeasy-default-stream-fim: - requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} - responses: - "422": - application/json: {} - "200": {} - fim_completion_v1_fim_completions_post: - speakeasy-default-fim-completion-v1-fim-completions-post: - requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} - responses: - "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} - "422": - application/json: {} -examplesVersion: 1.0.0 -generatedTests: {} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index d7be7fed..82d625bb 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -4,6 +4,7 @@ generation: maintainOpenAPIOrder: true usageSnippets: optionalPropertyRendering: withExample + sdkInitStyle: constructor useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true @@ -11,11 +12,21 @@ generation: parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true securityFeb2025: false + sharedErrorComponentsApr2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + persistentEdits: {} + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false python: - version: 1.6.0 + version: 2.0.1 additionalDependencies: dev: pytest: ^8.2.2 @@ -23,9 +34,15 @@ python: main: google-auth: ^2.31.0 requests: ^2.32.3 + allowedRedefinedBuiltins: + - id + - object + asyncMode: both authors: - Mistral + baseErrorName: MistralGcpError clientServerStatusCodesAsErrors: true + constFieldCasing: upper defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. enableCustomCodeRegions: false @@ -43,11 +60,19 @@ python: operations: "" shared: "" webhooks: "" + inferUnionDiscriminators: true inputModelSuffix: input + license: "" maxMethodParams: 15 methodArguments: infer-optional-args + moduleName: "" + multipartArrayFormat: legacy outputModelSuffix: output + packageManager: uv packageName: mistralai-gcp + preApplyUnionDiscriminators: false + pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat + sseFlatResponse: false templateVersion: v2 diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index 9d735d08..48103e30 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -3,21 +3,22 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md index 1646528d..dc82a8ef 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md index ad376158..a0465ffb 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index 827943cd..aaacc09c 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -3,21 +3,22 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md index cce0ca3e..43f3ca38 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionStreamRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/completionchunk.md b/packages/mistralai_gcp/docs/models/completionchunk.md index b8ae6a09..7f8ab5e6 100644 --- a/packages/mistralai_gcp/docs/models/completionchunk.md +++ b/packages/mistralai_gcp/docs/models/completionchunk.md @@ -6,8 +6,8 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | | `id` | *str* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | -| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | | `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | `created` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index 7507b90c..380f109c 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -5,13 +5,13 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md index da786a1f..cd62d034 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index 6cc439c7..a890ff2b 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -5,13 +5,13 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md index a166b7bb..b2bdb3fe 100644 --- a/packages/mistralai_gcp/docs/models/function.md +++ b/packages/mistralai_gcp/docs/models/function.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | -| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/mistralai_gcp/docs/models/jsonschema.md index ae387867..7ff7c070 100644 --- a/packages/mistralai_gcp/docs/models/jsonschema.md +++ b/packages/mistralai_gcp/docs/models/jsonschema.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ----------------------- | ----------------------- | ----------------------- | ----------------------- | | `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/prediction.md b/packages/mistralai_gcp/docs/models/prediction.md index 86e9c396..fae3c1ca 100644 --- a/packages/mistralai_gcp/docs/models/prediction.md +++ b/packages/mistralai_gcp/docs/models/prediction.md @@ -1,5 +1,7 @@ # Prediction +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + ## Fields diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md index 23a1641b..5cab22f2 100644 --- a/packages/mistralai_gcp/docs/models/responseformat.md +++ b/packages/mistralai_gcp/docs/models/responseformat.md @@ -1,9 +1,11 @@ # ResponseFormat +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/mistralai_gcp/docs/models/responseformats.md index 06886afe..2f5f1e55 100644 --- a/packages/mistralai_gcp/docs/models/responseformats.md +++ b/packages/mistralai_gcp/docs/models/responseformats.md @@ -1,7 +1,5 @@ # ResponseFormats -An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. - ## Values diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontent.md b/packages/mistralai_gcp/docs/models/systemmessagecontent.md index e0d27d9f..0c87baf3 100644 --- a/packages/mistralai_gcp/docs/models/systemmessagecontent.md +++ b/packages/mistralai_gcp/docs/models/systemmessagecontent.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.SystemMessageContentChunks]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.SystemMessageContentChunks] = /* values here */ ``` diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/mistralai_gcp/docs/models/tool.md index 822f86f8..fb661f72 100644 --- a/packages/mistralai_gcp/docs/models/tool.md +++ b/packages/mistralai_gcp/docs/models/tool.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md index 574be1ea..3819236b 100644 --- a/packages/mistralai_gcp/docs/models/toolcall.md +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolchoice.md b/packages/mistralai_gcp/docs/models/toolchoice.md index 792ebcd6..373046bb 100644 --- a/packages/mistralai_gcp/docs/models/toolchoice.md +++ b/packages/mistralai_gcp/docs/models/toolchoice.md @@ -7,5 +7,5 @@ ToolChoice is either a ToolChoiceEnum or a ToolChoice | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usageinfo.md b/packages/mistralai_gcp/docs/models/usageinfo.md index 9f56a3ae..f5204ac9 100644 --- a/packages/mistralai_gcp/docs/models/usageinfo.md +++ b/packages/mistralai_gcp/docs/models/usageinfo.md @@ -3,8 +3,10 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | -| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | -| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index c80721af..a8fcb932 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.10 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no @@ -188,8 +188,8 @@ good-names=i, Run, _, e, - n, - id + id, + n # Good variable names regexes, separated by a comma. If names match any regex, # they will always be accepted @@ -458,7 +458,8 @@ disable=raw-checker-failed, relative-beyond-top-level, consider-using-with, wildcard-import, - unused-wildcard-import + unused-wildcard-import, + too-many-return-statements # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -659,4 +660,4 @@ init-import=no # List of qualified module names which can have objects that can redefine # builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 79b8193b..65318b2b 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,57 +1,45 @@ [project] name = "mistralai-gcp" -version = "1.6.0" +version = "2.0.1" description = "Python Client SDK for the Mistral AI API in GCP." -authors = [{ name = "Mistral" }] -requires-python = ">=3.10" +authors = [{ name = "Mistral" },] readme = "README.md" +requires-python = ">=3.9.2" dependencies = [ - "eval-type-backport >=0.2.0", "google-auth (>=2.31.0,<3.0.0)", + "httpcore >=1.0.9", "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", + "pydantic >=2.11.2", "requests (>=2.32.3,<3.0.0)", - "typing-inspection >=0.4.0", ] [dependency-groups] dev = [ - "mypy==1.14.1", - "pylint==3.2.3", - "pytest>=8.2.2,<9", - "pytest-asyncio>=0.23.7,<0.24", - "types-python-dateutil>=2.9.0.20240316,<3", + "mypy ==1.15.0", + "pylint ==3.2.3", + "pyright ==1.1.398", + "pytest (>=8.2.2,<9.0.0)", + "pytest-asyncio (>=0.23.7,<0.24.0)", ] -[tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai_gcp/py.typed"] - -[tool.hatch.build.targets.sdist] -include = ["src/mistralai_gcp"] - -[tool.hatch.build.targets.sdist.force-include] -"py.typed" = "py.typed" -"src/mistralai_gcp/py.typed" = "src/mistralai_gcp/py.typed" - -[tool.hatch.build.targets.wheel] -include = ["src/mistralai_gcp"] +[tool.setuptools.packages.find] +where = ["src"] -[tool.hatch.build.targets.wheel.sources] -"src/mistralai_gcp" = "mistralai_gcp" - -[virtualenvs] -in-project = true +[tool.setuptools.package-data] +"*" = ["py.typed"] [build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" +requires = ["setuptools>=80", "wheel"] +build-backend = "setuptools.build_meta" [tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] disable_error_code = "misc" +explicit_package_bases = true +mypy_path = "src" [[tool.mypy.overrides]] module = "typing_inspect" @@ -64,3 +52,5 @@ ignore_missing_imports = true [tool.pyright] venvPath = "." venv = ".venv" + + diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index e9eb1f0b..ef28dc10 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -1,6 +1,4 @@ #!/usr/bin/env bash -export UV_PUBLISH_TOKEN=${PYPI_TOKEN} - -uv run python ../../scripts/prepare_readme.py --repo-subdir packages/mistralai_gcp -- uv build -uv publish +uv build +uv publish --token $PYPI_TOKEN diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py index bb867b5b..f8088f4c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -3,10 +3,12 @@ from abc import ABC, abstractmethod import httpx from mistralai_gcp.httpclient import HttpClient +from mistralai_gcp.sdkconfiguration import SDKConfiguration from typing import Any, Callable, List, Optional, Tuple, Union class HookContext: + config: SDKConfiguration base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None @@ -14,11 +16,13 @@ class HookContext: def __init__( self, + config: SDKConfiguration, base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.config = config self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes @@ -28,6 +32,7 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -38,6 +43,7 @@ def __init__(self, hook_ctx: HookContext): class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -48,6 +54,7 @@ def __init__(self, hook_ctx: HookContext): class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index 11f38b63..17d5ca64 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "1.6.0" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai-gcp" +__version__: str = "2.0.1" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.789.5" +__user_agent__: str = "speakeasy-sdk/python 2.0.1 2.789.5 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index bb0aab96..7a93de23 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -15,9 +15,19 @@ class BaseSDK: sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ - def __init__(self, sdk_config: SDKConfiguration) -> None: + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: self.sdk_configuration = sdk_config + self.parent_ref = parent_ref def _get_url(self, base_url, url_variables): sdk_url, sdk_variables = self.sdk_configuration.get_server_details() @@ -50,6 +60,7 @@ def _build_request_async( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.async_client return self._build_request_with_client( @@ -70,6 +81,7 @@ def _build_request_async( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request( @@ -92,6 +104,7 @@ def _build_request( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.client return self._build_request_with_client( @@ -112,6 +125,7 @@ def _build_request( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request_with_client( @@ -135,6 +149,7 @@ def _build_request_with_client( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: query_params = {} @@ -150,6 +165,7 @@ def _build_request_with_client( query_params = utils.get_query_params( request if request_has_query_params else None, _globals if request_has_query_params else None, + allow_empty_value, ) else: # Pick up the query parameter from the override so they can be @@ -218,12 +234,12 @@ def do_request( client = self.sdk_configuration.client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -237,16 +253,14 @@ def do(): http_res = client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -257,7 +271,7 @@ def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -267,7 +281,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res @@ -277,9 +291,7 @@ def do(): http_res = do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res @@ -294,12 +306,12 @@ async def do_request_async( client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + async def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -313,16 +325,14 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -333,7 +343,7 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -343,7 +353,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res @@ -355,8 +365,6 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index dba369bf..3f73b1ec 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -5,6 +5,7 @@ from mistralai_gcp._hooks import HookContext from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming +from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union @@ -41,11 +42,12 @@ def stream( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: + ) -> eventstreaming.EventStream[models.CompletionEvent]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -58,14 +60,15 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -104,6 +107,7 @@ def stream( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request( @@ -122,6 +126,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -135,9 +140,10 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -152,32 +158,23 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -209,11 +206,12 @@ async def stream_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -226,14 +224,15 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -272,6 +271,7 @@ async def stream_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request_async( @@ -290,6 +290,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -303,9 +304,10 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -320,32 +322,23 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -385,11 +378,12 @@ def complete( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Chat Completion :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. @@ -400,14 +394,15 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -448,6 +443,7 @@ def complete( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request( @@ -466,6 +462,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -479,9 +476,10 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -491,33 +489,20 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -557,11 +542,12 @@ async def complete_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Chat Completion :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. @@ -572,14 +558,15 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -620,6 +607,7 @@ async def complete_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request_async( @@ -638,6 +626,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -651,9 +640,10 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -663,30 +653,17 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 84821c6a..0fb67274 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -5,6 +5,7 @@ from mistralai_gcp._hooks import HookContext from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming +from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional, Union @@ -33,12 +34,12 @@ def stream( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: + ) -> eventstreaming.EventStream[models.CompletionEvent]: r"""Stream fim completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -92,6 +93,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -105,9 +107,10 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -122,32 +125,23 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -171,12 +165,12 @@ async def stream_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: r"""Stream fim completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -230,6 +224,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -243,9 +238,10 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -260,32 +256,23 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -309,12 +296,12 @@ def complete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FIMCompletionResponse]: + ) -> models.FIMCompletionResponse: r"""Fim Completion FIM completion. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -368,6 +355,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -381,9 +369,10 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -393,33 +382,20 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.FIMCompletionResponse] - ) + return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -443,12 +419,12 @@ async def complete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FIMCompletionResponse]: + ) -> models.FIMCompletionResponse: r"""Fim Completion FIM completion. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -502,6 +478,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -515,9 +492,10 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -527,30 +505,17 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.FIMCompletionResponse] - ) + return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py index 1e426352..89560b56 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -2,7 +2,6 @@ # pyright: reportReturnType = false import asyncio -from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -108,7 +107,6 @@ def close_clients( # to them from the owning SDK instance and they can be reaped. owner.client = None owner.async_client = None - if sync_client is not None and not sync_client_supplied: try: sync_client.close() @@ -116,21 +114,12 @@ def close_clients( pass if async_client is not None and not async_client_supplied: - is_async = False try: - asyncio.get_running_loop() - is_async = True + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: + try: asyncio.run(async_client.aclose()) - except Exception: - pass + except RuntimeError: + # best effort + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index 752e70e6..fe85b133 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -1,122 +1,154 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, -) -from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceFinishReason, - ChatCompletionChoiceTypedDict, -) -from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, - ChatCompletionRequestStop, - ChatCompletionRequestStopTypedDict, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, -) -from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, -) -from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, -) -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, - FinishReason, -) -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict -from .fimcompletionrequest import ( - FIMCompletionRequest, - FIMCompletionRequestStop, - FIMCompletionRequestStopTypedDict, - FIMCompletionRequestTypedDict, -) -from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict -from .fimcompletionstreamrequest import ( - FIMCompletionStreamRequest, - FIMCompletionStreamRequestStop, - FIMCompletionStreamRequestStopTypedDict, - FIMCompletionStreamRequestTypedDict, -) -from .function import Function, FunctionTypedDict -from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, -) -from .functionname import FunctionName, FunctionNameTypedDict -from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .imageurl import ImageURL, ImageURLTypedDict -from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, -) -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .responseformats import ResponseFormats -from .sdkerror import SDKError -from .security import Security, SecurityTypedDict -from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, -) -from .textchunk import TextChunk, TextChunkTypedDict, Type -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, -) -from .tooltypes import ToolTypes -from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, -) -from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, -) +from .mistralgcperror import MistralGcpError +from typing import TYPE_CHECKING +from importlib import import_module +import builtins +import sys +if TYPE_CHECKING: + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageRole, + AssistantMessageTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessages, + ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, + ) + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, + FinishReason, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict + from .deltamessage import ( + Content, + ContentTypedDict, + DeltaMessage, + DeltaMessageTypedDict, + ) + from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, + ) + from .fimcompletionresponse import ( + FIMCompletionResponse, + FIMCompletionResponseTypedDict, + ) + from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, + ) + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .mistralpromptmode import MistralPromptMode + from .no_response_error import NoResponseError + from .prediction import Prediction, PredictionTypedDict + from .referencechunk import ( + ReferenceChunk, + ReferenceChunkType, + ReferenceChunkTypedDict, + ) + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + from .security import Security, SecurityTypedDict + from .systemmessage import ( + Role, + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict, Type + from .thinkchunk import ( + ThinkChunk, + ThinkChunkType, + ThinkChunkTypedDict, + Thinking, + ThinkingTypedDict, + ) + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, + ) + from .tooltypes import ToolTypes + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) __all__ = [ "Arguments", @@ -187,6 +219,9 @@ "LocTypedDict", "Messages", "MessagesTypedDict", + "MistralGcpError", + "MistralPromptMode", + "NoResponseError", "Prediction", "PredictionTypedDict", "ReferenceChunk", @@ -195,6 +230,7 @@ "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", + "ResponseValidationError", "Role", "SDKError", "Security", @@ -203,10 +239,17 @@ "StopTypedDict", "SystemMessage", "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkType", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", @@ -231,3 +274,165 @@ "ValidationError", "ValidationErrorTypedDict", ] + +_dynamic_imports: dict[str, str] = { + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageRole": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessages": ".chatcompletionrequest", + "ChatCompletionRequestMessagesTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "Messages": ".chatcompletionstreamrequest", + "MessagesTypedDict": ".chatcompletionstreamrequest", + "Stop": ".chatcompletionstreamrequest", + "StopTypedDict": ".chatcompletionstreamrequest", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "FinishReason": ".completionresponsestreamchoice", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "Content": ".deltamessage", + "ContentTypedDict": ".deltamessage", + "DeltaMessage": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "FIMCompletionRequest": ".fimcompletionrequest", + "FIMCompletionRequestStop": ".fimcompletionrequest", + "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", + "FIMCompletionRequestTypedDict": ".fimcompletionrequest", + "FIMCompletionResponse": ".fimcompletionresponse", + "FIMCompletionResponseTypedDict": ".fimcompletionresponse", + "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkImageURL": ".imageurlchunk", + "ImageURLChunkImageURLTypedDict": ".imageurlchunk", + "ImageURLChunkType": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "MistralPromptMode": ".mistralpromptmode", + "NoResponseError": ".no_response_error", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkType": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", + "Security": ".security", + "SecurityTypedDict": ".security", + "Role": ".systemmessage", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "Type": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkType": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageRole": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolTypes": ".tooltypes", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageRole": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", +} + + +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " + ) + + try: + module = dynamic_import(module_name) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 9147f566..17d740b6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -25,7 +25,7 @@ ) -AssistantMessageRole = Literal["assistant"] +AssistantMessageRole = Literal["assistant",] class AssistantMessageTypedDict(TypedDict): @@ -56,7 +56,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py index 9bcf1240..06c955cc 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py @@ -10,7 +10,14 @@ ChatCompletionChoiceFinishReason = Union[ - Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index a0125c35..d693e3c3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai_gcp.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -61,11 +63,13 @@ "ChatCompletionRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionRequestToolChoice = TypeAliasType( "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionRequestTypedDict(TypedDict): @@ -86,16 +90,23 @@ class ChatCompletionRequestTypedDict(TypedDict): random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class ChatCompletionRequest(BaseModel): @@ -124,23 +135,33 @@ class ChatCompletionRequest(BaseModel): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -159,15 +180,23 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py index 0404a9d2..a7953eb1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class ChatCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class ChatCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class ChatCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class ChatCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index 656f1d58..c2d25128 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai_gcp.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -57,11 +59,13 @@ "ChatCompletionStreamRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionStreamRequestToolChoice = TypeAliasType( "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionStreamRequestTypedDict(TypedDict): @@ -81,16 +85,23 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class ChatCompletionStreamRequest(BaseModel): @@ -118,23 +129,33 @@ class ChatCompletionStreamRequest(BaseModel): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -153,15 +174,23 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py index 8d779971..c90a68c9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py @@ -10,7 +10,15 @@ from typing_extensions import Annotated, TypedDict -FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr] +FinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] class CompletionResponseStreamChoiceTypedDict(TypedDict): @@ -38,7 +46,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py index f9f0868b..1801ac76 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -46,7 +46,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 6dfb7373..12af226c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -27,10 +27,7 @@ class FIMCompletionRequestTypedDict(TypedDict): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" temperature: NotRequired[Nullable[float]] @@ -53,10 +50,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" @@ -110,7 +104,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py index a4d273a2..e1940b0a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class FIMCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class FIMCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class FIMCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class FIMCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 406749bb..ba7a66d2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -27,10 +27,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" temperature: NotRequired[Nullable[float]] @@ -52,10 +49,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" @@ -108,7 +102,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py index 11024f85..79609351 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py @@ -2,7 +2,9 @@ from __future__ import annotations from .validationerror import ValidationError -from mistralai_gcp import utils +from dataclasses import dataclass, field +import httpx +from mistralai_gcp.models import MistralGcpError from mistralai_gcp.types import BaseModel from typing import List, Optional @@ -11,11 +13,16 @@ class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None -class HTTPValidationError(Exception): - data: HTTPValidationErrorData +@dataclass(unsafe_hash=True) +class HTTPValidationError(MistralGcpError): + data: HTTPValidationErrorData = field(hash=False) - def __init__(self, data: HTTPValidationErrorData): - self.data = data - - def __str__(self) -> str: - return utils.marshal_json(self.data, HTTPValidationErrorData) + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) + object.__setattr__(self, "data", data) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py index e7aa11f0..20d4ba77 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py @@ -32,7 +32,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py index 1fc0b808..ddb53f21 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py @@ -15,7 +15,7 @@ ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) -ImageURLChunkType = Literal["image_url"] +ImageURLChunkType = Literal["image_url",] class ImageURLChunkTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py index 2529ce31..26914b2f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py @@ -40,7 +40,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py index 742aac0b..36c87ab0 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py @@ -10,11 +10,15 @@ class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + type: Literal["content"] content: NotRequired[str] class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + TYPE: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py index c4fa3b8b..904e8b82 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -ReferenceChunkType = Literal["reference"] +ReferenceChunkType = Literal["reference",] class ReferenceChunkTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index 5a24f644..9fe5116c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -16,14 +16,16 @@ class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: NotRequired[ResponseFormats] - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: Optional[ResponseFormats] = None - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: OptionalNullable[JSONSchema] = UNSET @@ -37,7 +39,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py index 08c39951..cbf83ce7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py @@ -4,5 +4,8 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object", "json_schema"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py index 03216cbf..7f53bbcd 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py @@ -1,22 +1,40 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from dataclasses import dataclass -from typing import Optional import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai_gcp.models import MistralGcpError + +MAX_MESSAGE_LEN = 10_000 + + +@dataclass(unsafe_hash=True) +class SDKError(MistralGcpError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + if message: + message += ": " + message += f"Status {raw_response.status_code}" -@dataclass -class SDKError(Exception): - """Represents an error returned by the API.""" + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" - message: str - status_code: int = -1 - body: str = "" - raw_response: Optional[httpx.Response] = None + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" - def __str__(self): - body = "" - if len(self.body) > 0: - body = f"\n{self.body}" + message += f". Body: {body_display}" + message = message.strip() - return f"{self.message}: Status {self.status_code}{body}" + super().__init__(message, raw_response, body) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py index f14acf12..d74bdf32 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -1,23 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) from mistralai_gcp.types import BaseModel from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( - "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], ) SystemMessageContent = TypeAliasType( - "SystemMessageContent", Union[str, List[TextChunk]] + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] ) -Role = Literal["system"] +Role = Literal["system",] class SystemMessageTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py index 12f666cd..c4a8cf28 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -Type = Literal["text"] +Type = Literal["text",] class TextChunkTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py index 8e6a6ad8..01f6f677 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py @@ -4,4 +4,9 @@ from typing import Literal -ToolChoiceEnum = Literal["auto", "none", "any", "required"] +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py index 886b6ff1..d6aa2621 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -22,7 +22,7 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool"] +ToolMessageRole = Literal["tool",] class ToolMessageTypedDict(TypedDict): @@ -51,7 +51,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py index 878444c6..8b812ae0 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py @@ -5,4 +5,4 @@ from typing import Literal, Union -ToolTypes = Union[Literal["function"], UnrecognizedStr] +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py index 9de6af7e..59f36158 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py @@ -1,19 +1,82 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing_extensions import TypedDict +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class UsageInfoTypedDict(TypedDict): - prompt_tokens: int - completion_tokens: int - total_tokens: int + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] class UsageInfo(BaseModel): - prompt_tokens: int + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - completion_tokens: int + prompt_tokens: Optional[int] = 0 - total_tokens: int + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + nullable_fields = ["prompt_audio_seconds"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py index 287bb1b4..0168b452 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -16,7 +16,7 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user"] +UserMessageRole = Literal["user",] class UserMessageTypedDict(TypedDict): @@ -39,7 +39,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index dd93cc7f..4e9fcad9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -9,6 +9,8 @@ import google.auth.transport import google.auth.transport.requests import httpx +import importlib +import sys from mistralai_gcp import models from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks @@ -28,14 +30,12 @@ "mistral-nemo-2407": "mistral-nemo@2407", } - def get_model_info(model: str) -> tuple[str, str]: # if the model requiers the legacy fomat, use it, else do nothing. if model in LEGACY_MODEL_ID_FORMAT: return "-".join(model.split("-")[:-1]), LEGACY_MODEL_ID_FORMAT[model] return model, model - class MistralGoogleCloud(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" @@ -43,6 +43,10 @@ class MistralGoogleCloud(BaseSDK): r"""Chat Completion API.""" fim: Fim r"""Fill-in-the-middle API.""" + _sub_sdk_map = { + "chat": ("mistralai_gcp.chat", "Chat"), + "fim": ("mistralai_gcp.fim", "Fim"), + } def __init__( self, @@ -66,36 +70,37 @@ def __init__( :param retry_config: The retry configuration to use for all supported methods :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ - + credentials = None if not access_token: credentials, loaded_project_id = google.auth.default( scopes=["https://www.googleapis.com/auth/cloud-platform"], ) - credentials.refresh(google.auth.transport.requests.Request()) - if not isinstance(credentials, google.auth.credentials.Credentials): - raise models.SDKError( - "credentials must be an instance of google.auth.credentials.Credentials" - ) + # default will already raise a google.auth.exceptions.DefaultCredentialsError if no credentials are found + assert isinstance( + credentials, google.auth.credentials.Credentials + ), "credentials must be an instance of google.auth.credentials.Credentials" + credentials.refresh(google.auth.transport.requests.Request()) project_id = project_id or loaded_project_id if project_id is None: - raise models.SDKError("project_id must be provided") + raise ValueError("project_id must be provided") def auth_token() -> str: if access_token: return access_token + assert credentials is not None, "credentials must be initialized" credentials.refresh(google.auth.transport.requests.Request()) token = credentials.token if not token: - raise models.SDKError("Failed to get token from credentials") + raise Exception("Failed to get token from credentials") return token client_supplied = True if client is None: - client = httpx.Client() + client = httpx.Client(follow_redirects=True) client_supplied = False assert issubclass( @@ -104,7 +109,7 @@ def auth_token() -> str: async_client_supplied = True if async_client is None: - async_client = httpx.AsyncClient() + async_client = httpx.AsyncClient(follow_redirects=True) async_client_supplied = False if debug_logger is None: @@ -114,13 +119,7 @@ def auth_token() -> str: type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - security: Any = None - if callable(auth_token): - security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment - api_key=auth_token() - ) - else: - security = models.Security(api_key=auth_token) + security = lambda: models.Security(api_key=auth_token()) BaseSDK.__init__( self, @@ -139,8 +138,13 @@ def auth_token() -> str: ) hooks = SDKHooks() + + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + hook = GoogleCloudBeforeRequestHook(region, project_id) hooks.register_before_request_hook(hook) + current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( current_server_url, client @@ -148,9 +152,6 @@ def auth_token() -> str: if current_server_url != server_url: self.sdk_configuration.server_url = server_url - # pylint: disable=protected-access - self.sdk_configuration.__dict__["_hooks"] = hooks - weakref.finalize( self, close_clients, @@ -161,11 +162,43 @@ def auth_token() -> str: self.sdk_configuration.async_client_supplied, ) - self._init_sdks() + def dynamic_import(self, modname, retries=3): + for attempt in range(retries): + try: + return importlib.import_module(modname) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = self.dynamic_import(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration, parent_ref=self) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) - def _init_sdks(self): - self.chat = Chat(self.sdk_configuration) - self.fim = Fim(self.sdk_configuration) + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) def __enter__(self): return self @@ -189,7 +222,6 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): await self.sdk_configuration.async_client.aclose() self.sdk_configuration.async_client = None - class GoogleCloudBeforeRequestHook(BeforeRequestHook): def __init__(self, region: str, project_id: str): self.region = region @@ -210,7 +242,7 @@ def before_request( new_content = json.dumps(parsed).encode("utf-8") if model_id == "": - raise models.SDKError("model must be provided") + raise ValueError("model must be provided") stream = "streamRawPredict" in request.url.path specifier = "streamRawPredict" if stream else "rawPredict" @@ -228,4 +260,4 @@ def before_request( stream=None, ) - return next_request + return next_request \ No newline at end of file diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index c373d27d..cf85c47e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._hooks import SDKHooks from ._version import ( __gen_version__, __openapi_doc_version__, @@ -42,9 +41,6 @@ class SDKConfiguration: retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None - def __post_init__(self): - self._hooks = SDKHooks() - def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} @@ -55,6 +51,3 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - - def get_hooks(self) -> SDKHooks: - return self._hooks diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py index a6187efa..231c2e37 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py @@ -2,7 +2,7 @@ from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union from typing_extensions import TypeAliasType, TypeAlias @@ -35,5 +35,5 @@ def __bool__(self) -> Literal[False]: "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) ) -UnrecognizedInt = NewType("UnrecognizedInt", int) -UnrecognizedStr = NewType("UnrecognizedStr", str) +UnrecognizedInt: TypeAlias = int +UnrecognizedStr: TypeAlias = str diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 3cded8fe..56164cf3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -1,50 +1,57 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .annotations import get_discriminator -from .enums import OpenEnumMeta -from .headers import get_headers, get_response_headers -from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, -) -from .queryparams import get_query_params -from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig -from .requestbodies import serialize_request_body, SerializedRequestBody -from .security import get_security -from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - validate_open_enum, -) -from .url import generate_url, template_url, remove_suffix -from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, -) -from .logger import Logger, get_body_content, get_default_logger +from typing import TYPE_CHECKING +from importlib import import_module +import builtins +import sys + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger __all__ = [ "BackoffStrategy", @@ -55,6 +62,7 @@ "get_body_content", "get_default_logger", "get_discriminator", + "parse_datetime", "get_global_from_env", "get_headers", "get_pydantic_model", @@ -97,3 +105,93 @@ "validate_open_enum", "cast_partial", ] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "validate_open_enum": ".serializers", + "cast_partial": ".values", +} + + +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " + ) + + try: + module = dynamic_import(module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py index 387874ed..12e0aa4f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py @@ -3,6 +3,7 @@ from enum import Enum from typing import Any, Optional + def get_discriminator(model: Any, fieldname: str, key: str) -> str: """ Recursively search for the discriminator attribute in a model. @@ -25,31 +26,54 @@ def get_field_discriminator(field: Any) -> Optional[str]: if isinstance(field, dict): if key in field: - return f'{field[key]}' + return f"{field[key]}" if hasattr(field, fieldname): attr = getattr(field, fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" if hasattr(field, upper_fieldname): attr = getattr(field, upper_fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" return None + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None if isinstance(model, list): for field in model: - discriminator = get_field_discriminator(field) + discriminator = search_nested_discriminator(field) if discriminator is not None: return discriminator - discriminator = get_field_discriminator(model) + discriminator = search_nested_discriminator(model) if discriminator is not None: return discriminator - raise ValueError(f'Could not find discriminator field {fieldname} in {model}') + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py index c650b10c..c3bc13cf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py @@ -1,34 +1,74 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import enum - +import sys class OpenEnumMeta(enum.EnumMeta): - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py index 74a63f75..0969899b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py @@ -17,6 +17,9 @@ class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] @@ -25,9 +28,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref def __iter__(self): return self @@ -43,6 +48,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] @@ -51,9 +59,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref def __aiter__(self): return self diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py index 0472aba8..f961e76b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py @@ -86,11 +86,39 @@ def _populate_form( return form +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + def serialize_multipart_form( media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: form: Dict[str, Any] = {} - files: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] if not isinstance(request, BaseModel): raise TypeError("invalid request body type") @@ -112,38 +140,41 @@ def serialize_multipart_form( f_name = field.alias if field.alias else name if field_metadata.file: - file_fields: Dict[str, FieldInfo] = val.__class__.model_fields - - file_name = "" - content = None - content_type = None + if isinstance(val, List): + # Handle array of files + array_field_name = f_name + "[]" + for file_obj in val: + if not _is_set(file_obj): + continue - for file_field_name in file_fields: - file_field = file_fields[file_field_name] + file_name, content, content_type = _extract_file_properties( + file_obj + ) - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue + if content_type is not None: + files.append( + (array_field_name, (file_name, content, content_type)) + ) + else: + files.append((array_field_name, (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) - if file_metadata.content: - content = getattr(val, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(val, file_field_name, None) + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) else: - file_name = getattr(val, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - if content_type is not None: - files[f_name] = (file_name, content, content_type) - else: - files[f_name] = (file_name, content) + files.append((f_name, (file_name, content))) elif field_metadata.json: - files[f_name] = ( - None, - marshal_json(val, request_field_types[name]), - "application/json", + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) ) else: if isinstance(val, List): @@ -154,7 +185,8 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - form[f_name + "[]"] = values + array_field_name = f_name + "[]" + form[array_field_name] = values else: form[f_name] = _val_to_string(val) return media_type, form, files diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py index 37a6e7f9..c04e0db8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py @@ -27,12 +27,13 @@ def get_query_params( query_params: Any, gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, ) -> Dict[str, List[str]]: params: Dict[str, List[str]] = {} - globals_already_populated = _populate_query_params(query_params, gbls, params, []) + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated) + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) return params @@ -42,6 +43,7 @@ def _populate_query_params( gbls: Any, query_param_values: Dict[str, List[str]], skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, ) -> List[str]: globals_already_populated: List[str] = [] @@ -69,6 +71,16 @@ def _populate_query_params( globals_already_populated.append(name) f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + serialization = metadata.serialization if serialization is not None: serialized_parms = _get_serialized_params( diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py index 4d608671..88a91b10 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py @@ -3,7 +3,9 @@ import asyncio import random import time -from typing import List +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional import httpx @@ -51,9 +53,11 @@ def __init__(self, config: RetryConfig, status_codes: List[str]): class TemporaryError(Exception): response: httpx.Response + retry_after: Optional[int] def __init__(self, response: httpx.Response): self.response = response + self.retry_after = _parse_retry_after_header(response) class PermanentError(Exception): @@ -63,6 +67,62 @@ def __init__(self, inner: Exception): self.inner = inner +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + def retry(func, retries: Retries): if retries.config.strategy == "backoff": @@ -183,8 +243,10 @@ def retry_with_backoff( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) time.sleep(sleep) retries += 1 @@ -211,7 +273,9 @@ async def retry_with_backoff_async( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) await asyncio.sleep(sleep) retries += 1 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index baa41fbd..378a14c0 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -1,13 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from decimal import Decimal +import functools import json -from typing import Any, Dict, List, Union, get_args -import httpx +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions from typing_extensions import get_origin + +import httpx from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset @@ -185,6 +188,15 @@ def is_nullable(field): return False +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) + + def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) @@ -217,3 +229,21 @@ def _contains_pydantic_model(data: Any) -> bool: return any(_contains_pydantic_model(value) for value in data.values()) return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result diff --git a/packages/mistralai_gcp/uv.lock b/packages/mistralai_gcp/uv.lock index afd17643..b0751527 100644 --- a/packages/mistralai_gcp/uv.lock +++ b/packages/mistralai_gcp/uv.lock @@ -1,10 +1,11 @@ version = 1 revision = 3 -requires-python = ">=3.10" +requires-python = ">=3.9.2" resolution-markers = [ "python_full_version >= '3.12'", "python_full_version == '3.11.*'", "python_full_version == '3.10.*'", + "python_full_version < '3.10'", ] [[package]] @@ -183,15 +184,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size = 119668, upload-time = "2025-04-16T00:41:47.671Z" }, ] -[[package]] -name = "eval-type-backport" -version = "0.3.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fb/a3/cafafb4558fd638aadfe4121dc6cefb8d743368c085acb2f521df0f3d9d7/eval_type_backport-0.3.1.tar.gz", hash = "sha256:57e993f7b5b69d271e37482e62f74e76a0276c82490cf8e4f0dffeb6b332d5ed", size = 9445, upload-time = "2025-12-02T11:51:42.987Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/22/fdc2e30d43ff853720042fa15baa3e6122722be1a7950a98233ebb55cd71/eval_type_backport-0.3.1-py3-none-any.whl", hash = "sha256:279ab641905e9f11129f56a8a78f493518515b83402b860f6f06dd7c011fdfa8", size = 6063, upload-time = "2025-12-02T11:51:41.665Z" }, -] - [[package]] name = "exceptiongroup" version = "1.3.1" @@ -269,6 +261,7 @@ name = "iniconfig" version = "2.1.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ + "python_full_version < '3.10'", ] sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } wheels = [ @@ -309,89 +302,85 @@ wheels = [ [[package]] name = "mistralai-gcp" -version = "1.6.0" +version = "2.0.1" source = { editable = "." } dependencies = [ - { name = "eval-type-backport" }, { name = "google-auth" }, + { name = "httpcore" }, { name = "httpx" }, { name = "pydantic" }, - { name = "python-dateutil" }, { name = "requests" }, - { name = "typing-inspection" }, ] [package.dev-dependencies] dev = [ { name = "mypy" }, { name = "pylint" }, + { name = "pyright" }, { name = "pytest" }, { name = "pytest-asyncio" }, - { name = "types-python-dateutil" }, ] [package.metadata] requires-dist = [ - { name = "eval-type-backport", specifier = ">=0.2.0" }, { name = "google-auth", specifier = ">=2.31.0,<3.0.0" }, + { name = "httpcore", specifier = ">=1.0.9" }, { name = "httpx", specifier = ">=0.28.1" }, - { name = "pydantic", specifier = ">=2.10.3" }, - { name = "python-dateutil", specifier = ">=2.8.2" }, + { name = "pydantic", specifier = ">=2.11.2" }, { name = "requests", specifier = ">=2.32.3,<3.0.0" }, - { name = "typing-inspection", specifier = ">=0.4.0" }, ] [package.metadata.requires-dev] dev = [ - { name = "mypy", specifier = "==1.14.1" }, + { name = "mypy", specifier = "==1.15.0" }, { name = "pylint", specifier = "==3.2.3" }, - { name = "pytest", specifier = ">=8.2.2,<9" }, - { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, - { name = "types-python-dateutil", specifier = ">=2.9.0.20240316,<3" }, + { name = "pyright", specifier = "==1.1.398" }, + { name = "pytest", specifier = ">=8.2.2,<9.0.0" }, + { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24.0" }, ] [[package]] name = "mypy" -version = "1.14.1" +version = "1.15.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b9/eb/2c92d8ea1e684440f54fa49ac5d9a5f19967b7b472a281f419e69a8d228e/mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6", size = 3216051, upload-time = "2024-12-30T16:39:07.335Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/7a/87ae2adb31d68402da6da1e5f30c07ea6063e9f09b5e7cfc9dfa44075e74/mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb", size = 11211002, upload-time = "2024-12-30T16:37:22.435Z" }, - { url = "https://files.pythonhosted.org/packages/e1/23/eada4c38608b444618a132be0d199b280049ded278b24cbb9d3fc59658e4/mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0", size = 10358400, upload-time = "2024-12-30T16:37:53.526Z" }, - { url = "https://files.pythonhosted.org/packages/43/c9/d6785c6f66241c62fd2992b05057f404237deaad1566545e9f144ced07f5/mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d", size = 12095172, upload-time = "2024-12-30T16:37:50.332Z" }, - { url = "https://files.pythonhosted.org/packages/c3/62/daa7e787770c83c52ce2aaf1a111eae5893de9e004743f51bfcad9e487ec/mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b", size = 12828732, upload-time = "2024-12-30T16:37:29.96Z" }, - { url = "https://files.pythonhosted.org/packages/1b/a2/5fb18318a3637f29f16f4e41340b795da14f4751ef4f51c99ff39ab62e52/mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427", size = 13012197, upload-time = "2024-12-30T16:38:05.037Z" }, - { url = "https://files.pythonhosted.org/packages/28/99/e153ce39105d164b5f02c06c35c7ba958aaff50a2babba7d080988b03fe7/mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f", size = 9780836, upload-time = "2024-12-30T16:37:19.726Z" }, - { url = "https://files.pythonhosted.org/packages/da/11/a9422850fd506edbcdc7f6090682ecceaf1f87b9dd847f9df79942da8506/mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c", size = 11120432, upload-time = "2024-12-30T16:37:11.533Z" }, - { url = "https://files.pythonhosted.org/packages/b6/9e/47e450fd39078d9c02d620545b2cb37993a8a8bdf7db3652ace2f80521ca/mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1", size = 10279515, upload-time = "2024-12-30T16:37:40.724Z" }, - { url = "https://files.pythonhosted.org/packages/01/b5/6c8d33bd0f851a7692a8bfe4ee75eb82b6983a3cf39e5e32a5d2a723f0c1/mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8", size = 12025791, upload-time = "2024-12-30T16:36:58.73Z" }, - { url = "https://files.pythonhosted.org/packages/f0/4c/e10e2c46ea37cab5c471d0ddaaa9a434dc1d28650078ac1b56c2d7b9b2e4/mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f", size = 12749203, upload-time = "2024-12-30T16:37:03.741Z" }, - { url = "https://files.pythonhosted.org/packages/88/55/beacb0c69beab2153a0f57671ec07861d27d735a0faff135a494cd4f5020/mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1", size = 12885900, upload-time = "2024-12-30T16:37:57.948Z" }, - { url = "https://files.pythonhosted.org/packages/a2/75/8c93ff7f315c4d086a2dfcde02f713004357d70a163eddb6c56a6a5eff40/mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae", size = 9777869, upload-time = "2024-12-30T16:37:33.428Z" }, - { url = "https://files.pythonhosted.org/packages/43/1b/b38c079609bb4627905b74fc6a49849835acf68547ac33d8ceb707de5f52/mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14", size = 11266668, upload-time = "2024-12-30T16:38:02.211Z" }, - { url = "https://files.pythonhosted.org/packages/6b/75/2ed0d2964c1ffc9971c729f7a544e9cd34b2cdabbe2d11afd148d7838aa2/mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9", size = 10254060, upload-time = "2024-12-30T16:37:46.131Z" }, - { url = "https://files.pythonhosted.org/packages/a1/5f/7b8051552d4da3c51bbe8fcafffd76a6823779101a2b198d80886cd8f08e/mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11", size = 11933167, upload-time = "2024-12-30T16:37:43.534Z" }, - { url = "https://files.pythonhosted.org/packages/04/90/f53971d3ac39d8b68bbaab9a4c6c58c8caa4d5fd3d587d16f5927eeeabe1/mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e", size = 12864341, upload-time = "2024-12-30T16:37:36.249Z" }, - { url = "https://files.pythonhosted.org/packages/03/d2/8bc0aeaaf2e88c977db41583559319f1821c069e943ada2701e86d0430b7/mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89", size = 12972991, upload-time = "2024-12-30T16:37:06.743Z" }, - { url = "https://files.pythonhosted.org/packages/6f/17/07815114b903b49b0f2cf7499f1c130e5aa459411596668267535fe9243c/mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b", size = 9879016, upload-time = "2024-12-30T16:37:15.02Z" }, - { url = "https://files.pythonhosted.org/packages/9e/15/bb6a686901f59222275ab228453de741185f9d54fecbaacec041679496c6/mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255", size = 11252097, upload-time = "2024-12-30T16:37:25.144Z" }, - { url = "https://files.pythonhosted.org/packages/f8/b3/8b0f74dfd072c802b7fa368829defdf3ee1566ba74c32a2cb2403f68024c/mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34", size = 10239728, upload-time = "2024-12-30T16:38:08.634Z" }, - { url = "https://files.pythonhosted.org/packages/c5/9b/4fd95ab20c52bb5b8c03cc49169be5905d931de17edfe4d9d2986800b52e/mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a", size = 11924965, upload-time = "2024-12-30T16:38:12.132Z" }, - { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660, upload-time = "2024-12-30T16:38:17.342Z" }, - { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198, upload-time = "2024-12-30T16:38:32.839Z" }, - { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276, upload-time = "2024-12-30T16:38:20.828Z" }, - { url = "https://files.pythonhosted.org/packages/ca/1f/186d133ae2514633f8558e78cd658070ba686c0e9275c5a5c24a1e1f0d67/mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35", size = 11200493, upload-time = "2024-12-30T16:38:26.935Z" }, - { url = "https://files.pythonhosted.org/packages/af/fc/4842485d034e38a4646cccd1369f6b1ccd7bc86989c52770d75d719a9941/mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc", size = 10357702, upload-time = "2024-12-30T16:38:50.623Z" }, - { url = "https://files.pythonhosted.org/packages/b4/e6/457b83f2d701e23869cfec013a48a12638f75b9d37612a9ddf99072c1051/mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9", size = 12091104, upload-time = "2024-12-30T16:38:53.735Z" }, - { url = "https://files.pythonhosted.org/packages/f1/bf/76a569158db678fee59f4fd30b8e7a0d75bcbaeef49edd882a0d63af6d66/mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb", size = 12830167, upload-time = "2024-12-30T16:38:56.437Z" }, - { url = "https://files.pythonhosted.org/packages/43/bc/0bc6b694b3103de9fed61867f1c8bd33336b913d16831431e7cb48ef1c92/mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60", size = 13013834, upload-time = "2024-12-30T16:38:59.204Z" }, - { url = "https://files.pythonhosted.org/packages/b0/79/5f5ec47849b6df1e6943d5fd8e6632fbfc04b4fd4acfa5a5a9535d11b4e2/mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c", size = 9781231, upload-time = "2024-12-30T16:39:05.124Z" }, - { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905, upload-time = "2024-12-30T16:38:42.021Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/5a/fa/79cf41a55b682794abe71372151dbbf856e3008f6767057229e6649d294a/mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078", size = 10737129, upload-time = "2025-02-05T03:50:24.509Z" }, + { url = "https://files.pythonhosted.org/packages/d3/33/dd8feb2597d648de29e3da0a8bf4e1afbda472964d2a4a0052203a6f3594/mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba", size = 9856335, upload-time = "2025-02-05T03:49:36.398Z" }, + { url = "https://files.pythonhosted.org/packages/e4/b5/74508959c1b06b96674b364ffeb7ae5802646b32929b7701fc6b18447592/mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5", size = 11611935, upload-time = "2025-02-05T03:49:14.154Z" }, + { url = "https://files.pythonhosted.org/packages/6c/53/da61b9d9973efcd6507183fdad96606996191657fe79701b2c818714d573/mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b", size = 12365827, upload-time = "2025-02-05T03:48:59.458Z" }, + { url = "https://files.pythonhosted.org/packages/c1/72/965bd9ee89540c79a25778cc080c7e6ef40aa1eeac4d52cec7eae6eb5228/mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2", size = 12541924, upload-time = "2025-02-05T03:50:03.12Z" }, + { url = "https://files.pythonhosted.org/packages/46/d0/f41645c2eb263e6c77ada7d76f894c580c9ddb20d77f0c24d34273a4dab2/mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980", size = 9271176, upload-time = "2025-02-05T03:50:10.86Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, ] [[package]] @@ -403,6 +392,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, ] +[[package]] +name = "nodeenv" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, +] + [[package]] name = "packaging" version = "25.0" @@ -417,6 +415,7 @@ name = "platformdirs" version = "4.4.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ + "python_full_version < '3.10'", ] sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" } wheels = [ @@ -632,15 +631,30 @@ dependencies = [ { name = "dill" }, { name = "isort" }, { name = "mccabe" }, + { name = "platformdirs", version = "4.4.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "platformdirs", version = "4.5.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "tomlkit" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, ] +[[package]] +name = "pyright" +version = "1.1.398" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/d6/48740f1d029e9fc4194880d1ad03dcf0ba3a8f802e0e166b8f63350b3584/pyright-1.1.398.tar.gz", hash = "sha256:357a13edd9be8082dc73be51190913e475fa41a6efb6ec0d4b7aab3bc11638d8", size = 3892675, upload-time = "2025-03-26T10:06:06.063Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/e0/5283593f61b3c525d6d7e94cfb6b3ded20b3df66e953acaf7bb4f23b3f6e/pyright-1.1.398-py3-none-any.whl", hash = "sha256:0a70bfd007d9ea7de1cf9740e1ad1a40a122592cfe22a3f6791b06162ad08753", size = 5780235, upload-time = "2025-03-26T10:06:03.994Z" }, +] + [[package]] name = "pytest" version = "8.4.2" @@ -648,6 +662,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig", version = "2.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "iniconfig", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, { name = "packaging" }, { name = "pluggy" }, @@ -671,18 +686,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ee/82/62e2d63639ecb0fbe8a7ee59ef0bc69a4669ec50f6d3459f74ad4e4189a2/pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2", size = 17663, upload-time = "2024-07-17T17:39:32.478Z" }, ] -[[package]] -name = "python-dateutil" -version = "2.9.0.post0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "six" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, -] - [[package]] name = "requests" version = "2.32.5" @@ -710,15 +713,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, ] -[[package]] -name = "six" -version = "1.17.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, -] - [[package]] name = "tomli" version = "2.3.0" @@ -777,15 +771,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, ] -[[package]] -name = "types-python-dateutil" -version = "2.9.0.20251115" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6a/36/06d01fb52c0d57e9ad0c237654990920fa41195e4b3d640830dabf9eeb2f/types_python_dateutil-2.9.0.20251115.tar.gz", hash = "sha256:8a47f2c3920f52a994056b8786309b43143faa5a64d4cbb2722d6addabdf1a58", size = 16363, upload-time = "2025-11-15T03:00:13.717Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/0b/56961d3ba517ed0df9b3a27bfda6514f3d01b28d499d1bce9068cfe4edd1/types_python_dateutil-2.9.0.20251115-py3-none-any.whl", hash = "sha256:9cf9c1c582019753b8639a081deefd7e044b9fa36bd8217f565c6c4e36ee0624", size = 18251, upload-time = "2025-11-15T03:00:12.317Z" }, -] - [[package]] name = "typing-extensions" version = "4.15.0" diff --git a/scripts/publish.sh b/scripts/publish.sh index c41f3efb..c35748f3 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -export UV_PUBLISH_TOKEN=${PYPI_TOKEN} -uv run python scripts/prepare_readme.py -- uv build -uv publish +uv run python scripts/prepare_readme.py + +uv build +uv publish --token $PYPI_TOKEN diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 851d6fbe..06284209 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.10.0" +__version__: str = "1.11.0" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.687.13" -__user_agent__: str = "speakeasy-sdk/python 1.10.0 2.687.13 1.0.0 mistralai" +__gen_version__: str = "2.789.5" +__user_agent__: str = "speakeasy-sdk/python 1.11.0 2.789.5 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/accesses.py b/src/mistralai/accesses.py index dd8ffade..ac166838 100644 --- a/src/mistralai/accesses.py +++ b/src/mistralai/accesses.py @@ -58,6 +58,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -74,7 +75,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -147,6 +148,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -163,7 +165,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -253,6 +255,7 @@ def update_or_create( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_in, False, False, "json", models.SharingIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -269,7 +272,7 @@ def update_or_create( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -359,6 +362,7 @@ async def update_or_create_async( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_in, False, False, "json", models.SharingIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -375,7 +379,7 @@ async def update_or_create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -462,6 +466,7 @@ def delete( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_delete, False, False, "json", models.SharingDelete ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -478,7 +483,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -565,6 +570,7 @@ async def delete_async( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_delete, False, False, "json", models.SharingDelete ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -581,7 +587,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 173921fa..656f7b4c 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -131,6 +131,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -147,7 +148,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -293,6 +294,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -309,7 +311,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -457,6 +459,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -473,7 +476,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_agents", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -629,6 +632,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -645,7 +649,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_agents", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index 6b62ddae..c9a32aa1 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -60,6 +60,7 @@ def _build_request_async( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.async_client return self._build_request_with_client( @@ -80,6 +81,7 @@ def _build_request_async( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request( @@ -102,6 +104,7 @@ def _build_request( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.client return self._build_request_with_client( @@ -122,6 +125,7 @@ def _build_request( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request_with_client( @@ -145,6 +149,7 @@ def _build_request_with_client( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: query_params = {} @@ -160,6 +165,7 @@ def _build_request_with_client( query_params = utils.get_query_params( request if request_has_query_params else None, _globals if request_has_query_params else None, + allow_empty_value, ) else: # Pick up the query parameter from the override so they can be diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 6a8058f7..fa4c4ed8 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -212,6 +212,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -228,7 +229,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -373,6 +374,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -389,7 +391,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -546,6 +548,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -562,7 +565,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -727,6 +730,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -743,7 +747,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index cd6a9415..d1a3c5e5 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -65,6 +65,7 @@ def moderate( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -81,7 +82,7 @@ def moderate( config=self.sdk_configuration, base_url=base_url or "", operation_id="moderations_v1_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -161,6 +162,7 @@ async def moderate_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -177,7 +179,7 @@ async def moderate_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="moderations_v1_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -257,6 +259,7 @@ def moderate_chat( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatModerationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -273,7 +276,7 @@ def moderate_chat( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_moderations_v1_chat_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -353,6 +356,7 @@ async def moderate_chat_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatModerationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -369,7 +373,7 @@ async def moderate_chat_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_moderations_v1_chat_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -449,6 +453,7 @@ def classify( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -465,7 +470,7 @@ def classify( config=self.sdk_configuration, base_url=base_url or "", operation_id="classifications_v1_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -545,6 +550,7 @@ async def classify_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -561,7 +567,7 @@ async def classify_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="classifications_v1_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -638,6 +644,7 @@ def classify_chat( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -654,7 +661,7 @@ def classify_chat( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_classifications_v1_chat_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -731,6 +738,7 @@ async def classify_chat_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -747,7 +755,7 @@ async def classify_chat_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_classifications_v1_chat_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index a7d58abd..a376c279 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -309,6 +309,7 @@ def start( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ConversationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -325,7 +326,7 @@ def start( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -441,6 +442,7 @@ async def start_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ConversationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -457,7 +459,7 @@ async def start_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -536,6 +538,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -552,7 +555,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_list", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -631,6 +634,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -647,7 +651,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_list", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -720,6 +724,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -736,7 +741,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -811,6 +816,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -827,7 +833,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -902,6 +908,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -918,7 +925,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -991,6 +998,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1007,7 +1015,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1110,6 +1118,7 @@ def append( "json", models.ConversationAppendRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1126,7 +1135,7 @@ def append( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1229,6 +1238,7 @@ async def append_async( "json", models.ConversationAppendRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1245,7 +1255,7 @@ async def append_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1318,6 +1328,7 @@ def get_history( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1334,7 +1345,7 @@ def get_history( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_history", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1407,6 +1418,7 @@ async def get_history_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1423,7 +1435,7 @@ async def get_history_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_history", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1496,6 +1508,7 @@ def get_messages( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1512,7 +1525,7 @@ def get_messages( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_messages", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1585,6 +1598,7 @@ async def get_messages_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1601,7 +1615,7 @@ async def get_messages_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_messages", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1713,6 +1727,7 @@ def restart( "json", models.ConversationRestartRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1729,7 +1744,7 @@ def restart( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1841,6 +1856,7 @@ async def restart_async( "json", models.ConversationRestartRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1857,7 +1873,7 @@ async def restart_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1982,6 +1998,7 @@ def start_stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ConversationStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1998,7 +2015,7 @@ def start_stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2130,6 +2147,7 @@ async def start_stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ConversationStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2146,7 +2164,7 @@ async def start_stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2256,6 +2274,7 @@ def append_stream( "json", models.ConversationAppendStreamRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2272,7 +2291,7 @@ def append_stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2382,6 +2401,7 @@ async def append_stream_async( "json", models.ConversationAppendStreamRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2398,7 +2418,7 @@ async def append_stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2517,6 +2537,7 @@ def restart_stream( "json", models.ConversationRestartStreamRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2533,7 +2554,7 @@ def restart_stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2652,6 +2673,7 @@ async def restart_stream_async( "json", models.ConversationRestartStreamRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2668,7 +2690,7 @@ async def restart_stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/documents.py b/src/mistralai/documents.py index c1497bff..50a177f7 100644 --- a/src/mistralai/documents.py +++ b/src/mistralai/documents.py @@ -76,6 +76,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -92,7 +93,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -183,6 +184,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -199,7 +201,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -241,7 +243,15 @@ def upload( Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search :param library_id: - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -284,6 +294,7 @@ def upload( "multipart", models.LibrariesDocumentsUploadV1DocumentUpload, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -300,7 +311,7 @@ def upload( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_upload_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -342,7 +353,15 @@ async def upload_async( Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search :param library_id: - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -385,6 +404,7 @@ async def upload_async( "multipart", models.LibrariesDocumentsUploadV1DocumentUpload, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -401,7 +421,7 @@ async def upload_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_upload_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -477,6 +497,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -493,7 +514,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -569,6 +590,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -585,7 +607,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -678,6 +700,7 @@ def update( "json", models.DocumentUpdateIn, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -694,7 +717,7 @@ def update( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -787,6 +810,7 @@ async def update_async( "json", models.DocumentUpdateIn, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -803,7 +827,7 @@ async def update_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -879,6 +903,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -895,7 +920,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -971,6 +996,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -987,7 +1013,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1063,6 +1089,7 @@ def text_content( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1079,7 +1106,7 @@ def text_content( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_text_content_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1155,6 +1182,7 @@ async def text_content_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1171,7 +1199,7 @@ async def text_content_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_text_content_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1247,6 +1275,7 @@ def status( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1263,7 +1292,7 @@ def status( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_status_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1339,6 +1368,7 @@ async def status_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1355,7 +1385,7 @@ async def status_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_status_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1431,6 +1461,7 @@ def get_signed_url( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1447,7 +1478,7 @@ def get_signed_url( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1523,6 +1554,7 @@ async def get_signed_url_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1539,7 +1571,7 @@ async def get_signed_url_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1615,6 +1647,7 @@ def extracted_text_signed_url( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1631,7 +1664,7 @@ def extracted_text_signed_url( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_extracted_text_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1707,6 +1740,7 @@ async def extracted_text_signed_url_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1723,7 +1757,7 @@ async def extracted_text_signed_url_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_extracted_text_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1799,6 +1833,7 @@ def reprocess( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1815,7 +1850,7 @@ def reprocess( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_reprocess_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1891,6 +1926,7 @@ async def reprocess_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1907,7 +1943,7 @@ async def reprocess_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_reprocess_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 76e8e719..cf250355 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -75,6 +75,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.EmbeddingRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -91,7 +92,7 @@ def create( config=self.sdk_configuration, base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -181,6 +182,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.EmbeddingRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -197,7 +199,7 @@ async def create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/files.py b/src/mistralai/files.py index ae4eb779..4189470a 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -31,7 +31,15 @@ def upload( Please contact us if you need to increase these storage limits. - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -73,6 +81,7 @@ def upload( "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -89,7 +98,7 @@ def upload( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_upload_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -128,7 +137,15 @@ async def upload_async( Please contact us if you need to increase these storage limits. - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -170,6 +187,7 @@ async def upload_async( "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -186,7 +204,7 @@ async def upload_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_upload_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -271,6 +289,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -287,7 +306,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_list_files", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -372,6 +391,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -388,7 +408,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_list_files", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -455,6 +475,7 @@ def retrieve( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -471,7 +492,7 @@ def retrieve( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_retrieve_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -538,6 +559,7 @@ async def retrieve_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -554,7 +576,7 @@ async def retrieve_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_retrieve_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -621,6 +643,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -637,7 +660,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_delete_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -704,6 +727,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -720,7 +744,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_delete_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -787,6 +811,7 @@ def download( accept_header_value="application/octet-stream", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -803,7 +828,7 @@ def download( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_download_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -872,6 +897,7 @@ async def download_async( accept_header_value="application/octet-stream", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -888,7 +914,7 @@ async def download_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_download_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -958,6 +984,7 @@ def get_signed_url( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -974,7 +1001,7 @@ def get_signed_url( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_get_signed_url", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1042,6 +1069,7 @@ async def get_signed_url_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1058,7 +1086,7 @@ async def get_signed_url_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_get_signed_url", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 49bdb32e..a341149d 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -96,6 +96,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -112,7 +113,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -223,6 +224,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -239,7 +241,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -350,6 +352,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -366,7 +369,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -485,6 +488,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -501,7 +505,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py index 47b052cb..89560b56 100644 --- a/src/mistralai/httpclient.py +++ b/src/mistralai/httpclient.py @@ -107,7 +107,6 @@ def close_clients( # to them from the owning SDK instance and they can be reaped. owner.client = None owner.async_client = None - if sync_client is not None and not sync_client_supplied: try: sync_client.close() diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index af6364cb..6d81920c 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -84,6 +84,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -100,7 +101,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -194,6 +195,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -210,7 +212,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -335,6 +337,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.JobIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -351,7 +354,7 @@ def create( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -478,6 +481,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.JobIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -494,7 +498,7 @@ async def create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -563,6 +567,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -579,7 +584,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -648,6 +653,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -664,7 +670,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -733,6 +739,7 @@ def cancel( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -749,7 +756,7 @@ def cancel( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -818,6 +825,7 @@ async def cancel_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -834,7 +842,7 @@ async def cancel_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -903,6 +911,7 @@ def start( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -919,7 +928,7 @@ def start( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -988,6 +997,7 @@ async def start_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1004,7 +1014,7 @@ async def start_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/libraries.py b/src/mistralai/libraries.py index e9f19047..32648937 100644 --- a/src/mistralai/libraries.py +++ b/src/mistralai/libraries.py @@ -70,6 +70,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -86,7 +87,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -146,6 +147,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -162,7 +164,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -238,6 +240,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.LibraryIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -254,7 +257,7 @@ def create( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -336,6 +339,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.LibraryIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -352,7 +356,7 @@ async def create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -425,6 +429,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -441,7 +446,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -514,6 +519,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -530,7 +536,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -603,6 +609,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -619,7 +626,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -692,6 +699,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -708,7 +716,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -792,6 +800,7 @@ def update( get_serialized_body=lambda: utils.serialize_request_body( request.library_in_update, False, False, "json", models.LibraryInUpdate ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -808,7 +817,7 @@ def update( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -892,6 +901,7 @@ async def update_async( get_serialized_body=lambda: utils.serialize_request_body( request.library_in_update, False, False, "json", models.LibraryInUpdate ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -908,7 +918,7 @@ async def update_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py index 0d9ad0b7..235f6a5d 100644 --- a/src/mistralai/mistral_agents.py +++ b/src/mistralai/mistral_agents.py @@ -93,6 +93,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentCreationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -109,7 +110,7 @@ def create( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_create", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -217,6 +218,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentCreationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -233,7 +235,7 @@ async def create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_create", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -324,6 +326,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -340,7 +343,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_list", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -431,6 +434,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -447,7 +451,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_list", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -523,6 +527,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -539,7 +544,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -615,6 +620,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -631,7 +637,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -751,6 +757,7 @@ def update( "json", models.AgentUpdateRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -767,7 +774,7 @@ def update( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -887,6 +894,7 @@ async def update_async( "json", models.AgentUpdateRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -903,7 +911,7 @@ async def update_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -974,6 +982,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -990,7 +999,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1061,6 +1070,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1077,7 +1087,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1153,6 +1163,7 @@ def update_version( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1169,7 +1180,7 @@ def update_version( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update_version", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1245,6 +1256,7 @@ async def update_version_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1261,7 +1273,7 @@ async def update_version_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update_version", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index 6c213756..e59662ea 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -78,6 +78,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -94,7 +95,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -182,6 +183,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -198,7 +200,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -283,6 +285,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.BatchJobIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -299,7 +302,7 @@ def create( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -384,6 +387,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.BatchJobIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -400,7 +404,7 @@ async def create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -467,6 +471,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -483,7 +488,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -550,6 +555,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -566,7 +572,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -633,6 +639,7 @@ def cancel( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -649,7 +656,7 @@ def cancel( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -716,6 +723,7 @@ async def cancel_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -732,7 +740,7 @@ async def cancel_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/models/agent.py b/src/mistralai/models/agent.py index 5d0b39fa..eb30905b 100644 --- a/src/mistralai/models/agent.py +++ b/src/mistralai/models/agent.py @@ -42,7 +42,7 @@ ] -AgentObject = Literal["agent"] +AgentObject = Literal["agent",] class AgentTypedDict(TypedDict): diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py index 7fa3dfe9..625fb4fc 100644 --- a/src/mistralai/models/agentconversation.py +++ b/src/mistralai/models/agentconversation.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -AgentConversationObject = Literal["conversation"] +AgentConversationObject = Literal["conversation",] class AgentConversationTypedDict(TypedDict): diff --git a/src/mistralai/models/agenthandoffdoneevent.py b/src/mistralai/models/agenthandoffdoneevent.py index fa545a02..1cdbf456 100644 --- a/src/mistralai/models/agenthandoffdoneevent.py +++ b/src/mistralai/models/agenthandoffdoneevent.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -AgentHandoffDoneEventType = Literal["agent.handoff.done"] +AgentHandoffDoneEventType = Literal["agent.handoff.done",] class AgentHandoffDoneEventTypedDict(TypedDict): diff --git a/src/mistralai/models/agenthandoffentry.py b/src/mistralai/models/agenthandoffentry.py index 44bfe0f2..66136256 100644 --- a/src/mistralai/models/agenthandoffentry.py +++ b/src/mistralai/models/agenthandoffentry.py @@ -8,9 +8,10 @@ from typing_extensions import NotRequired, TypedDict -AgentHandoffEntryObject = Literal["entry"] +AgentHandoffEntryObject = Literal["entry",] -AgentHandoffEntryType = Literal["agent.handoff"] + +AgentHandoffEntryType = Literal["agent.handoff",] class AgentHandoffEntryTypedDict(TypedDict): diff --git a/src/mistralai/models/agenthandoffstartedevent.py b/src/mistralai/models/agenthandoffstartedevent.py index 9033a0a9..11bfa918 100644 --- a/src/mistralai/models/agenthandoffstartedevent.py +++ b/src/mistralai/models/agenthandoffstartedevent.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -AgentHandoffStartedEventType = Literal["agent.handoff.started"] +AgentHandoffStartedEventType = Literal["agent.handoff.started",] class AgentHandoffStartedEventTypedDict(TypedDict): diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py index cff27c4e..0f753cfc 100644 --- a/src/mistralai/models/archiveftmodelout.py +++ b/src/mistralai/models/archiveftmodelout.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -ArchiveFTModelOutObject = Literal["model"] +ArchiveFTModelOutObject = Literal["model",] class ArchiveFTModelOutTypedDict(TypedDict): diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py index 18841a72..a38a10c4 100644 --- a/src/mistralai/models/assistantmessage.py +++ b/src/mistralai/models/assistantmessage.py @@ -19,7 +19,7 @@ ) -AssistantMessageRole = Literal["assistant"] +AssistantMessageRole = Literal["assistant",] class AssistantMessageTypedDict(TypedDict): diff --git a/src/mistralai/models/audiochunk.py b/src/mistralai/models/audiochunk.py index 2780570a..64fc43ff 100644 --- a/src/mistralai/models/audiochunk.py +++ b/src/mistralai/models/audiochunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -AudioChunkType = Literal["input_audio"] +AudioChunkType = Literal["input_audio",] class AudioChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index a4a061ff..706841b7 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -12,7 +12,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -BaseModelCardType = Literal["base"] +BaseModelCardType = Literal["base",] class BaseModelCardTypedDict(TypedDict): diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py index 88304313..3d9f0dba 100644 --- a/src/mistralai/models/batchjobout.py +++ b/src/mistralai/models/batchjobout.py @@ -9,7 +9,7 @@ from typing_extensions import NotRequired, TypedDict -BatchJobOutObject = Literal["batch"] +BatchJobOutObject = Literal["batch",] class BatchJobOutTypedDict(TypedDict): diff --git a/src/mistralai/models/batchjobsout.py b/src/mistralai/models/batchjobsout.py index 8ce26f31..a1eba5db 100644 --- a/src/mistralai/models/batchjobsout.py +++ b/src/mistralai/models/batchjobsout.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -BatchJobsOutObject = Literal["list"] +BatchJobsOutObject = Literal["list",] class BatchJobsOutTypedDict(TypedDict): diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py index f4f37fb4..b26ce1ec 100644 --- a/src/mistralai/models/chatcompletionchoice.py +++ b/src/mistralai/models/chatcompletionchoice.py @@ -10,7 +10,14 @@ FinishReason = Union[ - Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/models/classifierdetailedjobout.py b/src/mistralai/models/classifierdetailedjobout.py index da5bd281..701aee6e 100644 --- a/src/mistralai/models/classifierdetailedjobout.py +++ b/src/mistralai/models/classifierdetailedjobout.py @@ -29,7 +29,9 @@ "CANCELLATION_REQUESTED", ] -ClassifierDetailedJobOutObject = Literal["job"] + +ClassifierDetailedJobOutObject = Literal["job",] + ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict @@ -37,7 +39,7 @@ ClassifierDetailedJobOutIntegrations = WandbIntegrationOut -ClassifierDetailedJobOutJobType = Literal["classifier"] +ClassifierDetailedJobOutJobType = Literal["classifier",] class ClassifierDetailedJobOutTypedDict(TypedDict): diff --git a/src/mistralai/models/classifierftmodelout.py b/src/mistralai/models/classifierftmodelout.py index 56ffe96d..d2a31fae 100644 --- a/src/mistralai/models/classifierftmodelout.py +++ b/src/mistralai/models/classifierftmodelout.py @@ -12,9 +12,10 @@ from typing_extensions import NotRequired, TypedDict -ClassifierFTModelOutObject = Literal["model"] +ClassifierFTModelOutObject = Literal["model",] -ClassifierFTModelOutModelType = Literal["classifier"] + +ClassifierFTModelOutModelType = Literal["classifier",] class ClassifierFTModelOutTypedDict(TypedDict): diff --git a/src/mistralai/models/classifierjobout.py b/src/mistralai/models/classifierjobout.py index c8df6da3..a2f7cc08 100644 --- a/src/mistralai/models/classifierjobout.py +++ b/src/mistralai/models/classifierjobout.py @@ -27,16 +27,18 @@ ] r"""The current status of the fine-tuning job.""" -ClassifierJobOutObject = Literal["job"] + +ClassifierJobOutObject = Literal["job",] r"""The object type of the fine-tuning job.""" + ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict ClassifierJobOutIntegrations = WandbIntegrationOut -ClassifierJobOutJobType = Literal["classifier"] +ClassifierJobOutJobType = Literal["classifier",] r"""The type of job (`FT` for fine-tuning).""" diff --git a/src/mistralai/models/codeinterpretertool.py b/src/mistralai/models/codeinterpretertool.py index b0fc4d20..48b74ee8 100644 --- a/src/mistralai/models/codeinterpretertool.py +++ b/src/mistralai/models/codeinterpretertool.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -CodeInterpreterToolType = Literal["code_interpreter"] +CodeInterpreterToolType = Literal["code_interpreter",] class CodeInterpreterToolTypedDict(TypedDict): diff --git a/src/mistralai/models/completiondetailedjobout.py b/src/mistralai/models/completiondetailedjobout.py index 8fb1b62a..df41bc2a 100644 --- a/src/mistralai/models/completiondetailedjobout.py +++ b/src/mistralai/models/completiondetailedjobout.py @@ -29,7 +29,9 @@ "CANCELLATION_REQUESTED", ] -CompletionDetailedJobOutObject = Literal["job"] + +CompletionDetailedJobOutObject = Literal["job",] + CompletionDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict @@ -37,7 +39,8 @@ CompletionDetailedJobOutIntegrations = WandbIntegrationOut -CompletionDetailedJobOutJobType = Literal["completion"] +CompletionDetailedJobOutJobType = Literal["completion",] + CompletionDetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict diff --git a/src/mistralai/models/completionftmodelout.py b/src/mistralai/models/completionftmodelout.py index ab71168b..7b6520de 100644 --- a/src/mistralai/models/completionftmodelout.py +++ b/src/mistralai/models/completionftmodelout.py @@ -11,9 +11,10 @@ from typing_extensions import NotRequired, TypedDict -CompletionFTModelOutObject = Literal["model"] +CompletionFTModelOutObject = Literal["model",] -ModelType = Literal["completion"] + +ModelType = Literal["completion",] class CompletionFTModelOutTypedDict(TypedDict): diff --git a/src/mistralai/models/completionjobout.py b/src/mistralai/models/completionjobout.py index bed67b50..70995d2a 100644 --- a/src/mistralai/models/completionjobout.py +++ b/src/mistralai/models/completionjobout.py @@ -28,18 +28,21 @@ ] r"""The current status of the fine-tuning job.""" -CompletionJobOutObject = Literal["job"] + +CompletionJobOutObject = Literal["job",] r"""The object type of the fine-tuning job.""" + IntegrationsTypedDict = WandbIntegrationOutTypedDict Integrations = WandbIntegrationOut -JobType = Literal["completion"] +JobType = Literal["completion",] r"""The type of job (`FT` for fine-tuning).""" + RepositoriesTypedDict = GithubRepositoryOutTypedDict diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py index 2426148c..c9657515 100644 --- a/src/mistralai/models/completionresponsestreamchoice.py +++ b/src/mistralai/models/completionresponsestreamchoice.py @@ -11,7 +11,13 @@ CompletionResponseStreamChoiceFinishReason = Union[ - Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/models/conversationappendrequest.py b/src/mistralai/models/conversationappendrequest.py index ecc47e45..15cbc687 100644 --- a/src/mistralai/models/conversationappendrequest.py +++ b/src/mistralai/models/conversationappendrequest.py @@ -8,7 +8,10 @@ from typing_extensions import NotRequired, TypedDict -ConversationAppendRequestHandoffExecution = Literal["client", "server"] +ConversationAppendRequestHandoffExecution = Literal[ + "client", + "server", +] class ConversationAppendRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/conversationappendstreamrequest.py b/src/mistralai/models/conversationappendstreamrequest.py index 25ffe5fb..8cecf89d 100644 --- a/src/mistralai/models/conversationappendstreamrequest.py +++ b/src/mistralai/models/conversationappendstreamrequest.py @@ -8,7 +8,10 @@ from typing_extensions import NotRequired, TypedDict -ConversationAppendStreamRequestHandoffExecution = Literal["client", "server"] +ConversationAppendStreamRequestHandoffExecution = Literal[ + "client", + "server", +] class ConversationAppendStreamRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/conversationhistory.py b/src/mistralai/models/conversationhistory.py index 472915fe..d5206a57 100644 --- a/src/mistralai/models/conversationhistory.py +++ b/src/mistralai/models/conversationhistory.py @@ -12,7 +12,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ConversationHistoryObject = Literal["conversation.history"] +ConversationHistoryObject = Literal["conversation.history",] + EntriesTypedDict = TypeAliasType( "EntriesTypedDict", diff --git a/src/mistralai/models/conversationmessages.py b/src/mistralai/models/conversationmessages.py index 9027045b..32ca9c20 100644 --- a/src/mistralai/models/conversationmessages.py +++ b/src/mistralai/models/conversationmessages.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -ConversationMessagesObject = Literal["conversation.messages"] +ConversationMessagesObject = Literal["conversation.messages",] class ConversationMessagesTypedDict(TypedDict): diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py index bd4368d2..09d934ed 100644 --- a/src/mistralai/models/conversationrequest.py +++ b/src/mistralai/models/conversationrequest.py @@ -16,7 +16,11 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -HandoffExecution = Literal["client", "server"] +HandoffExecution = Literal[ + "client", + "server", +] + ToolsTypedDict = TypeAliasType( "ToolsTypedDict", diff --git a/src/mistralai/models/conversationresponse.py b/src/mistralai/models/conversationresponse.py index 61de8565..ff318e35 100644 --- a/src/mistralai/models/conversationresponse.py +++ b/src/mistralai/models/conversationresponse.py @@ -11,7 +11,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ConversationResponseObject = Literal["conversation.response"] +ConversationResponseObject = Literal["conversation.response",] + OutputsTypedDict = TypeAliasType( "OutputsTypedDict", diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py index 091917fe..a9c8410c 100644 --- a/src/mistralai/models/conversationrestartrequest.py +++ b/src/mistralai/models/conversationrestartrequest.py @@ -9,7 +9,10 @@ from typing_extensions import NotRequired, TypedDict -ConversationRestartRequestHandoffExecution = Literal["client", "server"] +ConversationRestartRequestHandoffExecution = Literal[ + "client", + "server", +] class ConversationRestartRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py index 4bcf255a..0703bb5f 100644 --- a/src/mistralai/models/conversationrestartstreamrequest.py +++ b/src/mistralai/models/conversationrestartstreamrequest.py @@ -9,7 +9,10 @@ from typing_extensions import NotRequired, TypedDict -ConversationRestartStreamRequestHandoffExecution = Literal["client", "server"] +ConversationRestartStreamRequestHandoffExecution = Literal[ + "client", + "server", +] class ConversationRestartStreamRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py index 8c6d56c2..6ff56e17 100644 --- a/src/mistralai/models/conversationstreamrequest.py +++ b/src/mistralai/models/conversationstreamrequest.py @@ -16,7 +16,11 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -ConversationStreamRequestHandoffExecution = Literal["client", "server"] +ConversationStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + ConversationStreamRequestToolsTypedDict = TypeAliasType( "ConversationStreamRequestToolsTypedDict", diff --git a/src/mistralai/models/documentlibrarytool.py b/src/mistralai/models/documentlibrarytool.py index f36de710..8d4c122b 100644 --- a/src/mistralai/models/documentlibrarytool.py +++ b/src/mistralai/models/documentlibrarytool.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -DocumentLibraryToolType = Literal["document_library"] +DocumentLibraryToolType = Literal["document_library",] class DocumentLibraryToolTypedDict(TypedDict): diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py index 33f29ba8..6d0b1dc6 100644 --- a/src/mistralai/models/documenturlchunk.py +++ b/src/mistralai/models/documenturlchunk.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -DocumentURLChunkType = Literal["document_url"] +DocumentURLChunkType = Literal["document_url",] class DocumentURLChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/embeddingdtype.py b/src/mistralai/models/embeddingdtype.py index 4f3c41bd..26eee779 100644 --- a/src/mistralai/models/embeddingdtype.py +++ b/src/mistralai/models/embeddingdtype.py @@ -4,4 +4,10 @@ from typing import Literal -EmbeddingDtype = Literal["float", "int8", "uint8", "binary", "ubinary"] +EmbeddingDtype = Literal[ + "float", + "int8", + "uint8", + "binary", + "ubinary", +] diff --git a/src/mistralai/models/encodingformat.py b/src/mistralai/models/encodingformat.py index 6c28a15a..be6c1a14 100644 --- a/src/mistralai/models/encodingformat.py +++ b/src/mistralai/models/encodingformat.py @@ -4,4 +4,7 @@ from typing import Literal -EncodingFormat = Literal["float", "base64"] +EncodingFormat = Literal[ + "float", + "base64", +] diff --git a/src/mistralai/models/entitytype.py b/src/mistralai/models/entitytype.py index b5149c5f..8d2d4bbe 100644 --- a/src/mistralai/models/entitytype.py +++ b/src/mistralai/models/entitytype.py @@ -5,5 +5,12 @@ from typing import Literal, Union -EntityType = Union[Literal["User", "Workspace", "Org"], UnrecognizedStr] +EntityType = Union[ + Literal[ + "User", + "Workspace", + "Org", + ], + UnrecognizedStr, +] r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py index 8599192b..b109b350 100644 --- a/src/mistralai/models/filepurpose.py +++ b/src/mistralai/models/filepurpose.py @@ -5,4 +5,11 @@ from typing import Literal, Union -FilePurpose = Union[Literal["fine-tune", "batch", "ocr"], UnrecognizedStr] +FilePurpose = Union[ + Literal[ + "fine-tune", + "batch", + "ocr", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/models/finetuneablemodeltype.py b/src/mistralai/models/finetuneablemodeltype.py index 3507dc91..f5b8b2ed 100644 --- a/src/mistralai/models/finetuneablemodeltype.py +++ b/src/mistralai/models/finetuneablemodeltype.py @@ -4,4 +4,7 @@ from typing import Literal -FineTuneableModelType = Literal["completion", "classifier"] +FineTuneableModelType = Literal[ + "completion", + "classifier", +] diff --git a/src/mistralai/models/ftclassifierlossfunction.py b/src/mistralai/models/ftclassifierlossfunction.py index df2d19ff..c4ef66e0 100644 --- a/src/mistralai/models/ftclassifierlossfunction.py +++ b/src/mistralai/models/ftclassifierlossfunction.py @@ -4,4 +4,7 @@ from typing import Literal -FTClassifierLossFunction = Literal["single_class", "multi_class"] +FTClassifierLossFunction = Literal[ + "single_class", + "multi_class", +] diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py index 7159ce00..1c3bd04d 100644 --- a/src/mistralai/models/ftmodelcard.py +++ b/src/mistralai/models/ftmodelcard.py @@ -12,7 +12,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -FTModelCardType = Literal["fine-tuned"] +FTModelCardType = Literal["fine-tuned",] class FTModelCardTypedDict(TypedDict): diff --git a/src/mistralai/models/functioncallentry.py b/src/mistralai/models/functioncallentry.py index 1e47fda9..4ea62c4f 100644 --- a/src/mistralai/models/functioncallentry.py +++ b/src/mistralai/models/functioncallentry.py @@ -12,9 +12,10 @@ from typing_extensions import NotRequired, TypedDict -FunctionCallEntryObject = Literal["entry"] +FunctionCallEntryObject = Literal["entry",] -FunctionCallEntryType = Literal["function.call"] + +FunctionCallEntryType = Literal["function.call",] class FunctionCallEntryTypedDict(TypedDict): diff --git a/src/mistralai/models/functioncallevent.py b/src/mistralai/models/functioncallevent.py index 90b4b226..e3992cf1 100644 --- a/src/mistralai/models/functioncallevent.py +++ b/src/mistralai/models/functioncallevent.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -FunctionCallEventType = Literal["function.call.delta"] +FunctionCallEventType = Literal["function.call.delta",] class FunctionCallEventTypedDict(TypedDict): diff --git a/src/mistralai/models/functionresultentry.py b/src/mistralai/models/functionresultentry.py index f09e11ae..1c61395a 100644 --- a/src/mistralai/models/functionresultentry.py +++ b/src/mistralai/models/functionresultentry.py @@ -8,9 +8,10 @@ from typing_extensions import NotRequired, TypedDict -FunctionResultEntryObject = Literal["entry"] +FunctionResultEntryObject = Literal["entry",] -FunctionResultEntryType = Literal["function.result"] + +FunctionResultEntryType = Literal["function.result",] class FunctionResultEntryTypedDict(TypedDict): diff --git a/src/mistralai/models/functiontool.py b/src/mistralai/models/functiontool.py index 7ce5c464..009fe280 100644 --- a/src/mistralai/models/functiontool.py +++ b/src/mistralai/models/functiontool.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -FunctionToolType = Literal["function"] +FunctionToolType = Literal["function",] class FunctionToolTypedDict(TypedDict): diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py index 801c0540..b16ce0d2 100644 --- a/src/mistralai/models/githubrepositoryin.py +++ b/src/mistralai/models/githubrepositoryin.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -GithubRepositoryInType = Literal["github"] +GithubRepositoryInType = Literal["github",] class GithubRepositoryInTypedDict(TypedDict): diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py index 0d74c17a..372477c1 100644 --- a/src/mistralai/models/githubrepositoryout.py +++ b/src/mistralai/models/githubrepositoryout.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -GithubRepositoryOutType = Literal["github"] +GithubRepositoryOutType = Literal["github",] class GithubRepositoryOutTypedDict(TypedDict): diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py index e9136063..d467577a 100644 --- a/src/mistralai/models/httpvalidationerror.py +++ b/src/mistralai/models/httpvalidationerror.py @@ -2,6 +2,7 @@ from __future__ import annotations from .validationerror import ValidationError +from dataclasses import dataclass, field import httpx from mistralai.models import MistralError from mistralai.types import BaseModel @@ -12,8 +13,9 @@ class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None +@dataclass(unsafe_hash=True) class HTTPValidationError(MistralError): - data: HTTPValidationErrorData + data: HTTPValidationErrorData = field(hash=False) def __init__( self, @@ -23,4 +25,4 @@ def __init__( ): message = body or raw_response.text super().__init__(message, raw_response, body) - self.data = data + object.__setattr__(self, "data", data) diff --git a/src/mistralai/models/imagegenerationtool.py b/src/mistralai/models/imagegenerationtool.py index 27bb2d12..a92335db 100644 --- a/src/mistralai/models/imagegenerationtool.py +++ b/src/mistralai/models/imagegenerationtool.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -ImageGenerationToolType = Literal["image_generation"] +ImageGenerationToolType = Literal["image_generation",] class ImageGenerationToolTypedDict(TypedDict): diff --git a/src/mistralai/models/imageurlchunk.py b/src/mistralai/models/imageurlchunk.py index 498690f5..8e8aac42 100644 --- a/src/mistralai/models/imageurlchunk.py +++ b/src/mistralai/models/imageurlchunk.py @@ -15,7 +15,7 @@ ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) -ImageURLChunkType = Literal["image_url"] +ImageURLChunkType = Literal["image_url",] class ImageURLChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py index abdf18fd..680b1d58 100644 --- a/src/mistralai/models/jobsout.py +++ b/src/mistralai/models/jobsout.py @@ -24,7 +24,7 @@ ] -JobsOutObject = Literal["list"] +JobsOutObject = Literal["list",] class JobsOutTypedDict(TypedDict): diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py index d878173b..49951219 100644 --- a/src/mistralai/models/legacyjobmetadataout.py +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -LegacyJobMetadataOutObject = Literal["job.metadata"] +LegacyJobMetadataOutObject = Literal["job.metadata",] class LegacyJobMetadataOutTypedDict(TypedDict): diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py index c14ad5ae..edf05631 100644 --- a/src/mistralai/models/messageinputentry.py +++ b/src/mistralai/models/messageinputentry.py @@ -12,11 +12,17 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -Object = Literal["entry"] +Object = Literal["entry",] -MessageInputEntryType = Literal["message.input"] -MessageInputEntryRole = Literal["assistant", "user"] +MessageInputEntryType = Literal["message.input",] + + +MessageInputEntryRole = Literal[ + "assistant", + "user", +] + MessageInputEntryContentTypedDict = TypeAliasType( "MessageInputEntryContentTypedDict", diff --git a/src/mistralai/models/messageoutputentry.py b/src/mistralai/models/messageoutputentry.py index 1c2e4107..0e2df81e 100644 --- a/src/mistralai/models/messageoutputentry.py +++ b/src/mistralai/models/messageoutputentry.py @@ -12,11 +12,14 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -MessageOutputEntryObject = Literal["entry"] +MessageOutputEntryObject = Literal["entry",] -MessageOutputEntryType = Literal["message.output"] -MessageOutputEntryRole = Literal["assistant"] +MessageOutputEntryType = Literal["message.output",] + + +MessageOutputEntryRole = Literal["assistant",] + MessageOutputEntryContentTypedDict = TypeAliasType( "MessageOutputEntryContentTypedDict", diff --git a/src/mistralai/models/messageoutputevent.py b/src/mistralai/models/messageoutputevent.py index 474cb081..751767a3 100644 --- a/src/mistralai/models/messageoutputevent.py +++ b/src/mistralai/models/messageoutputevent.py @@ -9,9 +9,11 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -MessageOutputEventType = Literal["message.output.delta"] +MessageOutputEventType = Literal["message.output.delta",] + + +MessageOutputEventRole = Literal["assistant",] -MessageOutputEventRole = Literal["assistant"] MessageOutputEventContentTypedDict = TypeAliasType( "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] diff --git a/src/mistralai/models/mistralerror.py b/src/mistralai/models/mistralerror.py index a0ee5078..28cfd22d 100644 --- a/src/mistralai/models/mistralerror.py +++ b/src/mistralai/models/mistralerror.py @@ -2,25 +2,29 @@ import httpx from typing import Optional +from dataclasses import dataclass, field +@dataclass(unsafe_hash=True) class MistralError(Exception): """The base class for all HTTP error responses.""" message: str status_code: int body: str - headers: httpx.Headers - raw_response: httpx.Response + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) def __init__( self, message: str, raw_response: httpx.Response, body: Optional[str] = None ): - self.message = message - self.status_code = raw_response.status_code - self.body = body if body is not None else raw_response.text - self.headers = raw_response.headers - self.raw_response = raw_response + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) def __str__(self): return self.message diff --git a/src/mistralai/models/mistralpromptmode.py b/src/mistralai/models/mistralpromptmode.py index 0ffd6787..ee82fb6d 100644 --- a/src/mistralai/models/mistralpromptmode.py +++ b/src/mistralai/models/mistralpromptmode.py @@ -5,4 +5,4 @@ from typing import Literal, Union -MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] diff --git a/src/mistralai/models/modelconversation.py b/src/mistralai/models/modelconversation.py index e413b6fb..8eca4f97 100644 --- a/src/mistralai/models/modelconversation.py +++ b/src/mistralai/models/modelconversation.py @@ -42,7 +42,7 @@ ] -ModelConversationObject = Literal["conversation"] +ModelConversationObject = Literal["conversation",] class ModelConversationTypedDict(TypedDict): diff --git a/src/mistralai/models/no_response_error.py b/src/mistralai/models/no_response_error.py index f98beea2..1deab64b 100644 --- a/src/mistralai/models/no_response_error.py +++ b/src/mistralai/models/no_response_error.py @@ -1,12 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) class NoResponseError(Exception): """Error raised when no HTTP response is received from the server.""" message: str def __init__(self, message: str = "No response received"): - self.message = message + object.__setattr__(self, "message", message) super().__init__(message) def __str__(self): diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index e600d5b6..8bd13370 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -22,7 +22,10 @@ r"""Document to run OCR on""" -TableFormat = Literal["markdown", "html"] +TableFormat = Literal[ + "markdown", + "html", +] class OCRRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/ocrtableobject.py b/src/mistralai/models/ocrtableobject.py index 76f21f3b..5f30ab5e 100644 --- a/src/mistralai/models/ocrtableobject.py +++ b/src/mistralai/models/ocrtableobject.py @@ -7,7 +7,10 @@ from typing_extensions import Annotated, TypedDict -Format = Literal["markdown", "html"] +Format = Literal[ + "markdown", + "html", +] r"""Format of the table""" diff --git a/src/mistralai/models/referencechunk.py b/src/mistralai/models/referencechunk.py index 4a5503f2..1864ac79 100644 --- a/src/mistralai/models/referencechunk.py +++ b/src/mistralai/models/referencechunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -ReferenceChunkType = Literal["reference"] +ReferenceChunkType = Literal["reference",] class ReferenceChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/requestsource.py b/src/mistralai/models/requestsource.py index 5ab93af0..7b0a35c4 100644 --- a/src/mistralai/models/requestsource.py +++ b/src/mistralai/models/requestsource.py @@ -4,4 +4,8 @@ from typing import Literal -RequestSource = Literal["api", "playground", "agent_builder_v1"] +RequestSource = Literal[ + "api", + "playground", + "agent_builder_v1", +] diff --git a/src/mistralai/models/responsedoneevent.py b/src/mistralai/models/responsedoneevent.py index 296cb430..5a3a3dfb 100644 --- a/src/mistralai/models/responsedoneevent.py +++ b/src/mistralai/models/responsedoneevent.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -ResponseDoneEventType = Literal["conversation.response.done"] +ResponseDoneEventType = Literal["conversation.response.done",] class ResponseDoneEventTypedDict(TypedDict): diff --git a/src/mistralai/models/responseerrorevent.py b/src/mistralai/models/responseerrorevent.py index e4190d17..6cb1b268 100644 --- a/src/mistralai/models/responseerrorevent.py +++ b/src/mistralai/models/responseerrorevent.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -ResponseErrorEventType = Literal["conversation.response.error"] +ResponseErrorEventType = Literal["conversation.response.error",] class ResponseErrorEventTypedDict(TypedDict): diff --git a/src/mistralai/models/responseformats.py b/src/mistralai/models/responseformats.py index 258fe70e..cbf83ce7 100644 --- a/src/mistralai/models/responseformats.py +++ b/src/mistralai/models/responseformats.py @@ -4,4 +4,8 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object", "json_schema"] +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/src/mistralai/models/responsestartedevent.py b/src/mistralai/models/responsestartedevent.py index 6acb483e..d14d45ef 100644 --- a/src/mistralai/models/responsestartedevent.py +++ b/src/mistralai/models/responsestartedevent.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -ResponseStartedEventType = Literal["conversation.response.started"] +ResponseStartedEventType = Literal["conversation.response.started",] class ResponseStartedEventTypedDict(TypedDict): diff --git a/src/mistralai/models/responsevalidationerror.py b/src/mistralai/models/responsevalidationerror.py index fe31cfbd..ed301655 100644 --- a/src/mistralai/models/responsevalidationerror.py +++ b/src/mistralai/models/responsevalidationerror.py @@ -2,10 +2,12 @@ import httpx from typing import Optional +from dataclasses import dataclass from mistralai.models import MistralError +@dataclass(unsafe_hash=True) class ResponseValidationError(MistralError): """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" diff --git a/src/mistralai/models/sampletype.py b/src/mistralai/models/sampletype.py index adc90ec7..efb43e9b 100644 --- a/src/mistralai/models/sampletype.py +++ b/src/mistralai/models/sampletype.py @@ -6,6 +6,12 @@ SampleType = Union[ - Literal["pretrain", "instruct", "batch_request", "batch_result", "batch_error"], + Literal[ + "pretrain", + "instruct", + "batch_request", + "batch_result", + "batch_error", + ], UnrecognizedStr, ] diff --git a/src/mistralai/models/sdkerror.py b/src/mistralai/models/sdkerror.py index 2513f36b..65c45cf1 100644 --- a/src/mistralai/models/sdkerror.py +++ b/src/mistralai/models/sdkerror.py @@ -2,12 +2,14 @@ import httpx from typing import Optional +from dataclasses import dataclass from mistralai.models import MistralError MAX_MESSAGE_LEN = 10_000 +@dataclass(unsafe_hash=True) class SDKError(MistralError): """The fallback error class if no more specific error class is matched.""" diff --git a/src/mistralai/models/shareenum.py b/src/mistralai/models/shareenum.py index c2945514..634ba4b7 100644 --- a/src/mistralai/models/shareenum.py +++ b/src/mistralai/models/shareenum.py @@ -5,4 +5,10 @@ from typing import Literal, Union -ShareEnum = Union[Literal["Viewer", "Editor"], UnrecognizedStr] +ShareEnum = Union[ + Literal[ + "Viewer", + "Editor", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/models/source.py b/src/mistralai/models/source.py index c21550f2..cc3abce2 100644 --- a/src/mistralai/models/source.py +++ b/src/mistralai/models/source.py @@ -5,4 +5,11 @@ from typing import Literal, Union -Source = Union[Literal["upload", "repository", "mistral"], UnrecognizedStr] +Source = Union[ + Literal[ + "upload", + "repository", + "mistral", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py index 25b51f95..2b34607b 100644 --- a/src/mistralai/models/systemmessage.py +++ b/src/mistralai/models/systemmessage.py @@ -21,7 +21,7 @@ ) -Role = Literal["system"] +Role = Literal["system",] class SystemMessageTypedDict(TypedDict): diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py index 02b115f6..6052686e 100644 --- a/src/mistralai/models/textchunk.py +++ b/src/mistralai/models/textchunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -TextChunkType = Literal["text"] +TextChunkType = Literal["text",] class TextChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/thinkchunk.py b/src/mistralai/models/thinkchunk.py index 24b466f9..627ae488 100644 --- a/src/mistralai/models/thinkchunk.py +++ b/src/mistralai/models/thinkchunk.py @@ -16,7 +16,7 @@ Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) -ThinkChunkType = Literal["thinking"] +ThinkChunkType = Literal["thinking",] class ThinkChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/timestampgranularity.py b/src/mistralai/models/timestampgranularity.py index dd1b6446..02816df6 100644 --- a/src/mistralai/models/timestampgranularity.py +++ b/src/mistralai/models/timestampgranularity.py @@ -4,4 +4,4 @@ from typing import Literal -TimestampGranularity = Literal["segment"] +TimestampGranularity = Literal["segment",] diff --git a/src/mistralai/models/toolchoiceenum.py b/src/mistralai/models/toolchoiceenum.py index 8e6a6ad8..01f6f677 100644 --- a/src/mistralai/models/toolchoiceenum.py +++ b/src/mistralai/models/toolchoiceenum.py @@ -4,4 +4,9 @@ from typing import Literal -ToolChoiceEnum = Literal["auto", "none", "any", "required"] +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/src/mistralai/models/toolexecutiondeltaevent.py b/src/mistralai/models/toolexecutiondeltaevent.py index 25438206..4fca46a8 100644 --- a/src/mistralai/models/toolexecutiondeltaevent.py +++ b/src/mistralai/models/toolexecutiondeltaevent.py @@ -8,7 +8,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolExecutionDeltaEventType = Literal["tool.execution.delta"] +ToolExecutionDeltaEventType = Literal["tool.execution.delta",] + ToolExecutionDeltaEventNameTypedDict = TypeAliasType( "ToolExecutionDeltaEventNameTypedDict", Union[BuiltInConnectors, str] diff --git a/src/mistralai/models/toolexecutiondoneevent.py b/src/mistralai/models/toolexecutiondoneevent.py index 2dea3324..621d5571 100644 --- a/src/mistralai/models/toolexecutiondoneevent.py +++ b/src/mistralai/models/toolexecutiondoneevent.py @@ -8,7 +8,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolExecutionDoneEventType = Literal["tool.execution.done"] +ToolExecutionDoneEventType = Literal["tool.execution.done",] + ToolExecutionDoneEventNameTypedDict = TypeAliasType( "ToolExecutionDoneEventNameTypedDict", Union[BuiltInConnectors, str] diff --git a/src/mistralai/models/toolexecutionentry.py b/src/mistralai/models/toolexecutionentry.py index abe53e06..9f70a63b 100644 --- a/src/mistralai/models/toolexecutionentry.py +++ b/src/mistralai/models/toolexecutionentry.py @@ -9,9 +9,11 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolExecutionEntryObject = Literal["entry"] +ToolExecutionEntryObject = Literal["entry",] + + +ToolExecutionEntryType = Literal["tool.execution",] -ToolExecutionEntryType = Literal["tool.execution"] NameTypedDict = TypeAliasType("NameTypedDict", Union[BuiltInConnectors, str]) diff --git a/src/mistralai/models/toolexecutionstartedevent.py b/src/mistralai/models/toolexecutionstartedevent.py index cf4ecbfc..80dd5e97 100644 --- a/src/mistralai/models/toolexecutionstartedevent.py +++ b/src/mistralai/models/toolexecutionstartedevent.py @@ -8,7 +8,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolExecutionStartedEventType = Literal["tool.execution.started"] +ToolExecutionStartedEventType = Literal["tool.execution.started",] + ToolExecutionStartedEventNameTypedDict = TypeAliasType( "ToolExecutionStartedEventNameTypedDict", Union[BuiltInConnectors, str] diff --git a/src/mistralai/models/toolfilechunk.py b/src/mistralai/models/toolfilechunk.py index 77c07d6d..d3e9dcd4 100644 --- a/src/mistralai/models/toolfilechunk.py +++ b/src/mistralai/models/toolfilechunk.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -ToolFileChunkType = Literal["tool_file"] +ToolFileChunkType = Literal["tool_file",] class ToolFileChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py index 82f62e0f..ef917c43 100644 --- a/src/mistralai/models/toolmessage.py +++ b/src/mistralai/models/toolmessage.py @@ -16,7 +16,7 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool"] +ToolMessageRole = Literal["tool",] class ToolMessageTypedDict(TypedDict): diff --git a/src/mistralai/models/toolreferencechunk.py b/src/mistralai/models/toolreferencechunk.py index e50b8451..48035484 100644 --- a/src/mistralai/models/toolreferencechunk.py +++ b/src/mistralai/models/toolreferencechunk.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -ToolReferenceChunkType = Literal["tool_reference"] +ToolReferenceChunkType = Literal["tool_reference",] class ToolReferenceChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/tooltypes.py b/src/mistralai/models/tooltypes.py index fb581820..f54893c2 100644 --- a/src/mistralai/models/tooltypes.py +++ b/src/mistralai/models/tooltypes.py @@ -5,4 +5,4 @@ from typing import Literal, Union -ToolTypes = Union[Literal["function"], UnrecognizedStr] +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/src/mistralai/models/transcriptionsegmentchunk.py b/src/mistralai/models/transcriptionsegmentchunk.py index 53f1b397..aa30f053 100644 --- a/src/mistralai/models/transcriptionsegmentchunk.py +++ b/src/mistralai/models/transcriptionsegmentchunk.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -Type = Literal["transcription_segment"] +Type = Literal["transcription_segment",] class TranscriptionSegmentChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/transcriptionstreamdone.py b/src/mistralai/models/transcriptionstreamdone.py index ffd0e080..e1b1ab3d 100644 --- a/src/mistralai/models/transcriptionstreamdone.py +++ b/src/mistralai/models/transcriptionstreamdone.py @@ -13,7 +13,7 @@ from typing_extensions import NotRequired, TypedDict -TranscriptionStreamDoneType = Literal["transcription.done"] +TranscriptionStreamDoneType = Literal["transcription.done",] class TranscriptionStreamDoneTypedDict(TypedDict): diff --git a/src/mistralai/models/transcriptionstreamlanguage.py b/src/mistralai/models/transcriptionstreamlanguage.py index 8fc2aa6e..15b75144 100644 --- a/src/mistralai/models/transcriptionstreamlanguage.py +++ b/src/mistralai/models/transcriptionstreamlanguage.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -TranscriptionStreamLanguageType = Literal["transcription.language"] +TranscriptionStreamLanguageType = Literal["transcription.language",] class TranscriptionStreamLanguageTypedDict(TypedDict): diff --git a/src/mistralai/models/transcriptionstreamsegmentdelta.py b/src/mistralai/models/transcriptionstreamsegmentdelta.py index 61b396b4..d779ed83 100644 --- a/src/mistralai/models/transcriptionstreamsegmentdelta.py +++ b/src/mistralai/models/transcriptionstreamsegmentdelta.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -TranscriptionStreamSegmentDeltaType = Literal["transcription.segment"] +TranscriptionStreamSegmentDeltaType = Literal["transcription.segment",] class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): diff --git a/src/mistralai/models/transcriptionstreamtextdelta.py b/src/mistralai/models/transcriptionstreamtextdelta.py index 8f0b0e59..daee151f 100644 --- a/src/mistralai/models/transcriptionstreamtextdelta.py +++ b/src/mistralai/models/transcriptionstreamtextdelta.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -TranscriptionStreamTextDeltaType = Literal["transcription.text.delta"] +TranscriptionStreamTextDeltaType = Literal["transcription.text.delta",] class TranscriptionStreamTextDeltaTypedDict(TypedDict): diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py index 6b2f730d..55c0ea8a 100644 --- a/src/mistralai/models/unarchiveftmodelout.py +++ b/src/mistralai/models/unarchiveftmodelout.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -UnarchiveFTModelOutObject = Literal["model"] +UnarchiveFTModelOutObject = Literal["model",] class UnarchiveFTModelOutTypedDict(TypedDict): diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py index 049bc755..61590bed 100644 --- a/src/mistralai/models/usermessage.py +++ b/src/mistralai/models/usermessage.py @@ -16,7 +16,7 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user"] +UserMessageRole = Literal["user",] class UserMessageTypedDict(TypedDict): diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py index 0789b648..69053896 100644 --- a/src/mistralai/models/wandbintegration.py +++ b/src/mistralai/models/wandbintegration.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -WandbIntegrationType = Literal["wandb"] +WandbIntegrationType = Literal["wandb",] class WandbIntegrationTypedDict(TypedDict): diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py index a1c2f570..f5a9ba80 100644 --- a/src/mistralai/models/wandbintegrationout.py +++ b/src/mistralai/models/wandbintegrationout.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -WandbIntegrationOutType = Literal["wandb"] +WandbIntegrationOutType = Literal["wandb",] class WandbIntegrationOutTypedDict(TypedDict): diff --git a/src/mistralai/models/websearchpremiumtool.py b/src/mistralai/models/websearchpremiumtool.py index 70fc5626..3bbe753a 100644 --- a/src/mistralai/models/websearchpremiumtool.py +++ b/src/mistralai/models/websearchpremiumtool.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -WebSearchPremiumToolType = Literal["web_search_premium"] +WebSearchPremiumToolType = Literal["web_search_premium",] class WebSearchPremiumToolTypedDict(TypedDict): diff --git a/src/mistralai/models/websearchtool.py b/src/mistralai/models/websearchtool.py index 3dfd1c53..eeafecb4 100644 --- a/src/mistralai/models/websearchtool.py +++ b/src/mistralai/models/websearchtool.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -WebSearchToolType = Literal["web_search"] +WebSearchToolType = Literal["web_search",] class WebSearchToolTypedDict(TypedDict): diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index bf82cc16..d44930a0 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -51,6 +51,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -67,7 +68,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="list_models_v1_models_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -127,6 +128,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -143,7 +145,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="list_models_v1_models_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -210,6 +212,7 @@ def retrieve( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -226,7 +229,7 @@ def retrieve( config=self.sdk_configuration, base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -302,6 +305,7 @@ async def retrieve_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -318,7 +322,7 @@ async def retrieve_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -394,6 +398,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -410,7 +415,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -483,6 +488,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -499,7 +505,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -583,6 +589,7 @@ def update( get_serialized_body=lambda: utils.serialize_request_body( request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -599,7 +606,7 @@ def update( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -679,6 +686,7 @@ async def update_async( get_serialized_body=lambda: utils.serialize_request_body( request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -695,7 +703,7 @@ async def update_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -764,6 +772,7 @@ def archive( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -780,7 +789,7 @@ def archive( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -847,6 +856,7 @@ async def archive_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -863,7 +873,7 @@ async def archive_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -930,6 +940,7 @@ def unarchive( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -946,7 +957,7 @@ def unarchive( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1013,6 +1024,7 @@ async def unarchive_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1029,7 +1041,7 @@ async def unarchive_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index 6b283b35..a775511f 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -100,6 +100,7 @@ def process( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.OCRRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -116,7 +117,7 @@ def process( config=self.sdk_configuration, base_url=base_url or "", operation_id="ocr_v1_ocr_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -231,6 +232,7 @@ async def process_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.OCRRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -247,7 +249,7 @@ async def process_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="ocr_v1_ocr_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 311147fd..c83b53e0 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -92,7 +92,7 @@ def __init__( """ client_supplied = True if client is None: - client = httpx.Client() + client = httpx.Client(follow_redirects=True) client_supplied = False assert issubclass( @@ -101,7 +101,7 @@ def __init__( async_client_supplied = True if async_client is None: - async_client = httpx.AsyncClient() + async_client = httpx.AsyncClient(follow_redirects=True) async_client_supplied = False if debug_logger is None: diff --git a/src/mistralai/transcriptions.py b/src/mistralai/transcriptions.py index dc8ad2e8..33b0d83b 100644 --- a/src/mistralai/transcriptions.py +++ b/src/mistralai/transcriptions.py @@ -77,6 +77,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", models.AudioTranscriptionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -93,7 +94,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -179,6 +180,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", models.AudioTranscriptionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -195,7 +197,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -285,6 +287,7 @@ def stream( "multipart", models.AudioTranscriptionRequestStream, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -301,7 +304,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -397,6 +400,7 @@ async def stream_async( "multipart", models.AudioTranscriptionRequestStream, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -413,7 +417,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/utils/annotations.py b/src/mistralai/utils/annotations.py index 387874ed..12e0aa4f 100644 --- a/src/mistralai/utils/annotations.py +++ b/src/mistralai/utils/annotations.py @@ -3,6 +3,7 @@ from enum import Enum from typing import Any, Optional + def get_discriminator(model: Any, fieldname: str, key: str) -> str: """ Recursively search for the discriminator attribute in a model. @@ -25,31 +26,54 @@ def get_field_discriminator(field: Any) -> Optional[str]: if isinstance(field, dict): if key in field: - return f'{field[key]}' + return f"{field[key]}" if hasattr(field, fieldname): attr = getattr(field, fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" if hasattr(field, upper_fieldname): attr = getattr(field, upper_fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" return None + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None if isinstance(model, list): for field in model: - discriminator = get_field_discriminator(field) + discriminator = search_nested_discriminator(field) if discriminator is not None: return discriminator - discriminator = get_field_discriminator(model) + discriminator = search_nested_discriminator(model) if discriminator is not None: return discriminator - raise ValueError(f'Could not find discriminator field {fieldname} in {model}') + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/src/mistralai/utils/forms.py b/src/mistralai/utils/forms.py index e873495f..f961e76b 100644 --- a/src/mistralai/utils/forms.py +++ b/src/mistralai/utils/forms.py @@ -142,16 +142,21 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files + array_field_name = f_name + "[]" for file_obj in val: if not _is_set(file_obj): continue - - file_name, content, content_type = _extract_file_properties(file_obj) + + file_name, content, content_type = _extract_file_properties( + file_obj + ) if content_type is not None: - files.append((f_name + "[]", (file_name, content, content_type))) + files.append( + (array_field_name, (file_name, content, content_type)) + ) else: - files.append((f_name + "[]", (file_name, content))) + files.append((array_field_name, (file_name, content))) else: # Handle single file file_name, content, content_type = _extract_file_properties(val) @@ -161,11 +166,16 @@ def serialize_multipart_form( else: files.append((f_name, (file_name, content))) elif field_metadata.json: - files.append((f_name, ( - None, - marshal_json(val, request_field_types[name]), - "application/json", - ))) + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) else: if isinstance(val, List): values = [] @@ -175,7 +185,8 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - form[f_name + "[]"] = values + array_field_name = f_name + "[]" + form[array_field_name] = values else: form[f_name] = _val_to_string(val) return media_type, form, files diff --git a/src/mistralai/utils/queryparams.py b/src/mistralai/utils/queryparams.py index 37a6e7f9..c04e0db8 100644 --- a/src/mistralai/utils/queryparams.py +++ b/src/mistralai/utils/queryparams.py @@ -27,12 +27,13 @@ def get_query_params( query_params: Any, gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, ) -> Dict[str, List[str]]: params: Dict[str, List[str]] = {} - globals_already_populated = _populate_query_params(query_params, gbls, params, []) + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated) + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) return params @@ -42,6 +43,7 @@ def _populate_query_params( gbls: Any, query_param_values: Dict[str, List[str]], skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, ) -> List[str]: globals_already_populated: List[str] = [] @@ -69,6 +71,16 @@ def _populate_query_params( globals_already_populated.append(name) f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + serialization = metadata.serialization if serialization is not None: serialized_parms = _get_serialized_params( diff --git a/src/mistralai/utils/retries.py b/src/mistralai/utils/retries.py index 4d608671..88a91b10 100644 --- a/src/mistralai/utils/retries.py +++ b/src/mistralai/utils/retries.py @@ -3,7 +3,9 @@ import asyncio import random import time -from typing import List +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional import httpx @@ -51,9 +53,11 @@ def __init__(self, config: RetryConfig, status_codes: List[str]): class TemporaryError(Exception): response: httpx.Response + retry_after: Optional[int] def __init__(self, response: httpx.Response): self.response = response + self.retry_after = _parse_retry_after_header(response) class PermanentError(Exception): @@ -63,6 +67,62 @@ def __init__(self, inner: Exception): self.inner = inner +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + def retry(func, retries: Retries): if retries.config.strategy == "backoff": @@ -183,8 +243,10 @@ def retry_with_backoff( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) time.sleep(sleep) retries += 1 @@ -211,7 +273,9 @@ async def retry_with_backoff_async( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) await asyncio.sleep(sleep) retries += 1 diff --git a/src/mistralai/utils/unmarshal_json_response.py b/src/mistralai/utils/unmarshal_json_response.py index c0ce7e0f..64d0b3a6 100644 --- a/src/mistralai/utils/unmarshal_json_response.py +++ b/src/mistralai/utils/unmarshal_json_response.py @@ -1,12 +1,26 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import Any, Optional +from typing import Any, Optional, Type, TypeVar, overload import httpx from .serializers import unmarshal_json from mistralai import models +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + def unmarshal_json_response( typ: Any, http_res: httpx.Response, body: Optional[str] = None