Skip to content

Commit ca2e23d

Browse files
committed
Version 1.4.64
1 parent b7e53a5 commit ca2e23d

File tree

31 files changed

+232
-214
lines changed

31 files changed

+232
-214
lines changed

abacusai/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -279,4 +279,4 @@
279279
from .workflow_node_template import WorkflowNodeTemplate
280280

281281

282-
__version__ = "1.4.63"
282+
__version__ = "1.4.64"

abacusai/api_class/model.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -490,6 +490,7 @@ class ChatLLMTrainingConfig(TrainingConfig):
490490
search_score_cutoff (float): Minimum search score to consider a document as a valid search result.
491491
include_bm25_retrieval (bool): Combine BM25 search score with vector search using reciprocal rank fusion.
492492
database_connector_id (str): Database connector ID to use for connecting external database that gives access to structured data to the LLM.
493+
database_connector_ids (List[str]): List of database connector IDs to use for connecting external databases that give access to structured data to the LLM.
493494
database_connector_tables (List[str]): List of tables to use from the database connector for the ChatLLM.
494495
enable_code_execution (bool): Enable python code execution in the ChatLLM. This equips the LLM with a python kernel in which all its code is executed.
495496
enable_response_caching (bool): Enable caching of LLM responses to speed up response times and improve reproducibility.
@@ -527,7 +528,8 @@ class ChatLLMTrainingConfig(TrainingConfig):
527528
data_columns_to_ignore: List[str] = dataclasses.field(default=None)
528529
search_score_cutoff: float = dataclasses.field(default=None)
529530
include_bm25_retrieval: bool = dataclasses.field(default=None)
530-
database_connector_id: str = dataclasses.field(default=None)
531+
database_connector_id: str = dataclasses.field(default=None) # deprecated
532+
database_connector_ids: List[str] = dataclasses.field(default=None)
531533
database_connector_tables: List[str] = dataclasses.field(default=None)
532534
enable_code_execution: bool = dataclasses.field(default=None)
533535
metadata_columns: list = dataclasses.field(default=None, metadata={'deprecated': True})

abacusai/application_connector.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,25 +9,27 @@ class ApplicationConnector(AbstractApiClass):
99
client (ApiClient): An authenticated API Client instance
1010
applicationConnectorId (str): The unique ID for the connection.
1111
service (str): The service this connection connects to
12+
serviceName (str): For OAuth services, the specific provider name (e.g., 'spotify')
1213
name (str): A user-friendly name for the service
1314
createdAt (str): When the API key was created
1415
status (str): The status of the Application Connector
1516
auth (dict): Non-secret connection information for this connector
1617
"""
1718

18-
def __init__(self, client, applicationConnectorId=None, service=None, name=None, createdAt=None, status=None, auth=None):
19+
def __init__(self, client, applicationConnectorId=None, service=None, serviceName=None, name=None, createdAt=None, status=None, auth=None):
1920
super().__init__(client, applicationConnectorId)
2021
self.application_connector_id = applicationConnectorId
2122
self.service = service
23+
self.service_name = serviceName
2224
self.name = name
2325
self.created_at = createdAt
2426
self.status = status
2527
self.auth = auth
2628
self.deprecated_keys = {}
2729

2830
def __repr__(self):
29-
repr_dict = {f'application_connector_id': repr(self.application_connector_id), f'service': repr(self.service), f'name': repr(
30-
self.name), f'created_at': repr(self.created_at), f'status': repr(self.status), f'auth': repr(self.auth)}
31+
repr_dict = {f'application_connector_id': repr(self.application_connector_id), f'service': repr(self.service), f'service_name': repr(
32+
self.service_name), f'name': repr(self.name), f'created_at': repr(self.created_at), f'status': repr(self.status), f'auth': repr(self.auth)}
3133
class_name = "ApplicationConnector"
3234
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
3335
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -40,7 +42,7 @@ def to_dict(self):
4042
Returns:
4143
dict: The dict value representation of the class parameters
4244
"""
43-
resp = {'application_connector_id': self.application_connector_id, 'service': self.service,
45+
resp = {'application_connector_id': self.application_connector_id, 'service': self.service, 'service_name': self.service_name,
4446
'name': self.name, 'created_at': self.created_at, 'status': self.status, 'auth': self.auth}
4547
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
4648

abacusai/batch_prediction.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def __init__(self, client, batchPredictionId=None, createdAt=None, name=None, de
8484
BatchPredictionArgs, globalPredictionArgs)
8585
self.batch_prediction_args = client._build_class(getattr(
8686
api_class, batchPredictionArgsType, BatchPredictionArgs) if batchPredictionArgsType else BatchPredictionArgs, batchPredictionArgs)
87-
self.deprecated_keys = {'explanations', 'global_prediction_args'}
87+
self.deprecated_keys = {'global_prediction_args', 'explanations'}
8888

8989
def __repr__(self):
9090
repr_dict = {f'batch_prediction_id': repr(self.batch_prediction_id), f'created_at': repr(self.created_at), f'name': repr(self.name), f'deployment_id': repr(self.deployment_id), f'file_connector_output_location': repr(self.file_connector_output_location), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'output_feature_group_id': repr(self.output_feature_group_id), f'feature_group_table_name': repr(self.feature_group_table_name), f'output_feature_group_table_name': repr(self.output_feature_group_table_name), f'summary_feature_group_table_name': repr(self.summary_feature_group_table_name), f'csv_input_prefix': repr(

abacusai/batch_prediction_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def __init__(self, client, batchPredictionVersion=None, batchPredictionId=None,
100100
BatchPredictionArgs, globalPredictionArgs)
101101
self.batch_prediction_args = client._build_class(getattr(
102102
api_class, batchPredictionArgsType, BatchPredictionArgs) if batchPredictionArgsType else BatchPredictionArgs, batchPredictionArgs)
103-
self.deprecated_keys = {'explanations', 'global_prediction_args'}
103+
self.deprecated_keys = {'global_prediction_args', 'explanations'}
104104

105105
def __repr__(self):
106106
repr_dict = {f'batch_prediction_version': repr(self.batch_prediction_version), f'batch_prediction_id': repr(self.batch_prediction_id), f'status': repr(self.status), f'drift_monitor_status': repr(self.drift_monitor_status), f'deployment_id': repr(self.deployment_id), f'model_id': repr(self.model_id), f'model_version': repr(self.model_version), f'predictions_started_at': repr(self.predictions_started_at), f'predictions_completed_at': repr(self.predictions_completed_at), f'database_output_error': repr(self.database_output_error), f'total_predictions': repr(self.total_predictions), f'failed_predictions': repr(self.failed_predictions), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'file_connector_output_location': repr(self.file_connector_output_location), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'error': repr(self.error), f'drift_monitor_error': repr(self.drift_monitor_error), f'monitor_warnings': repr(self.monitor_warnings), f'csv_input_prefix': repr(

abacusai/client.py

Lines changed: 11 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -663,7 +663,7 @@ class BaseApiClient:
663663
client_options (ClientOptions): Optional API client configurations
664664
skip_version_check (bool): If true, will skip checking the server's current API version on initializing the client
665665
"""
666-
client_version = '1.4.63'
666+
client_version = '1.4.64'
667667

668668
def __init__(self, api_key: str = None, server: str = None, client_options: ClientOptions = None, skip_version_check: bool = False, include_tb: bool = False):
669669
self.api_key = api_key
@@ -4652,7 +4652,7 @@ def create_model_version_from_local_files(self, model_id: str, optional_artifact
46524652
ModelUpload: Collection of upload IDs to upload the model artifacts."""
46534653
return self._call_api('createModelVersionFromLocalFiles', 'POST', query_params={}, body={'modelId': model_id, 'optionalArtifacts': optional_artifacts}, parse_type=ModelUpload)
46544654

4655-
def get_streaming_chat_response(self, deployment_token: str, deployment_id: str, messages: list, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, ignore_documents: bool = False, include_search_results: bool = False):
4655+
def get_streaming_chat_response(self, deployment_token: str, deployment_id: str, messages: list, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, ignore_documents: bool = False, include_search_results: bool = False, user_info: dict = None):
46564656
"""Return an asynchronous generator which continues the conversation based on the input messages and search results.
46574657

46584658
Args:
@@ -4667,7 +4667,8 @@ def get_streaming_chat_response(self, deployment_token: str, deployment_id: str,
46674667
search_score_cutoff (float): Cutoff for the document retriever score. Matching search results below this score will be ignored.
46684668
chat_config (dict): A dictionary specifying the query chat config override.
46694669
ignore_documents (bool): If True, will ignore any documents and search results, and only use the messages to generate a response.
4670-
include_search_results (bool): If True, will also return search results, if relevant. """
4670+
include_search_results (bool): If True, will also return search results, if relevant.
4671+
user_info (dict): The information of the user to act on behalf of for user restricted data sources. """
46714672
headers = {'APIKEY': self.api_key}
46724673
body = {
46734674
'deploymentToken': deployment_token,
@@ -4681,15 +4682,16 @@ def get_streaming_chat_response(self, deployment_token: str, deployment_id: str,
46814682
'searchScoreCutoff': search_score_cutoff,
46824683
'chatConfig': chat_config,
46834684
'ignoreDocuments': ignore_documents,
4684-
'includeSearchResults': include_search_results
4685+
'includeSearchResults': include_search_results,
4686+
'userInfo': user_info
46854687
}
46864688
endpoint = self._get_proxy_endpoint(deployment_id, deployment_token)
46874689
if endpoint is None:
46884690
raise Exception(
46894691
'API not supported, Please contact Abacus.ai support')
46904692
return sse_asynchronous_generator(f'{endpoint}/api/getStreamingChatResponse', headers, body)
46914693

4692-
def get_streaming_conversation_response(self, deployment_token: str, deployment_id: str, message: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, ignore_documents: bool = False, include_search_results: bool = False):
4694+
def get_streaming_conversation_response(self, deployment_token: str, deployment_id: str, message: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, ignore_documents: bool = False, include_search_results: bool = False, user_info: dict = None):
46934695
"""Return an asynchronous generator which continues the conversation based on the input messages and search results.
46944696

46954697
Args:
@@ -4706,7 +4708,8 @@ def get_streaming_conversation_response(self, deployment_token: str, deployment_
47064708
search_score_cutoff (float): Cutoff for the document retriever score. Matching search results below this score will be ignored.
47074709
chat_config (dict): A dictionary specifying the query chat config override.
47084710
ignore_documents (bool): If True, will ignore any documents and search results, and only use the messages to generate a response.
4709-
include_search_results (bool): If True, will also return search results, if relevant. """
4711+
include_search_results (bool): If True, will also return search results, if relevant.
4712+
user_info (dict): The information of the user to act on behalf of for user restricted data sources. """
47104713
headers = {'APIKEY': self.api_key}
47114714
body = {
47124715
'deploymentToken': deployment_token,
@@ -4722,7 +4725,8 @@ def get_streaming_conversation_response(self, deployment_token: str, deployment_
47224725
'searchScoreCutoff': search_score_cutoff,
47234726
'chatConfig': chat_config,
47244727
'ignoreDocuments': ignore_documents,
4725-
'includeSearchResults': include_search_results
4728+
'includeSearchResults': include_search_results,
4729+
'userInfo': user_info
47264730
}
47274731
endpoint = self._get_proxy_endpoint(deployment_id, deployment_token)
47284732
if endpoint is None:
@@ -7902,17 +7906,6 @@ def transfer_style(self, deployment_token: str, deployment_id: str, source_image
79027906
deployment_id, deployment_token) if deployment_token else None
79037907
return self._call_api('transferStyle', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, data={}, files={'sourceImage': source_image, 'styleImage': style_image}, streamable_response=True, server_override=prediction_url)
79047908

7905-
def generate_image(self, deployment_token: str, deployment_id: str, query_data: dict) -> io.BytesIO:
7906-
"""Generate an image from text prompt.
7907-
7908-
Args:
7909-
deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
7910-
deployment_id (str): A unique identifier to a deployment created under the project.
7911-
query_data (dict): Specifies the text prompt. For example, {'prompt': 'a cat'}"""
7912-
prediction_url = self._get_prediction_endpoint(
7913-
deployment_id, deployment_token) if deployment_token else None
7914-
return self._call_api('generateImage', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data}, streamable_response=True, server_override=prediction_url)
7915-
79167909
def execute_agent(self, deployment_token: str, deployment_id: str, arguments: list = None, keyword_arguments: dict = None) -> Dict:
79177910
"""Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
79187911

abacusai/prediction_client.py

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -672,17 +672,6 @@ def transfer_style(self, deployment_token: str, deployment_id: str, source_image
672672
deployment_id, deployment_token) if deployment_token else None
673673
return self._call_api('transferStyle', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, data={}, files={'sourceImage': source_image, 'styleImage': style_image}, streamable_response=True, server_override=prediction_url)
674674

675-
def generate_image(self, deployment_token: str, deployment_id: str, query_data: dict) -> io.BytesIO:
676-
"""Generate an image from text prompt.
677-
678-
Args:
679-
deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
680-
deployment_id (str): A unique identifier to a deployment created under the project.
681-
query_data (dict): Specifies the text prompt. For example, {'prompt': 'a cat'}"""
682-
prediction_url = self._get_prediction_endpoint(
683-
deployment_id, deployment_token) if deployment_token else None
684-
return self._call_api('generateImage', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data}, streamable_response=True, server_override=prediction_url)
685-
686675
def get_matrix_agent_schema(self, deployment_token: str, deployment_id: str, query: str, doc_infos: list = None, deployment_conversation_id: str = None, external_session_id: str = None) -> Dict:
687676
"""Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
688677

abacusai/presentation_export_result.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,18 @@ class PresentationExportResult(AbstractApiClass):
88
Args:
99
client (ApiClient): An authenticated API Client instance
1010
filePath (str): The path to the exported presentation
11+
webViewLink (str): The web view link to the exported presentation (if applicable)
1112
"""
1213

13-
def __init__(self, client, filePath=None):
14+
def __init__(self, client, filePath=None, webViewLink=None):
1415
super().__init__(client, None)
1516
self.file_path = filePath
17+
self.web_view_link = webViewLink
1618
self.deprecated_keys = {}
1719

1820
def __repr__(self):
19-
repr_dict = {f'file_path': repr(self.file_path)}
21+
repr_dict = {f'file_path': repr(
22+
self.file_path), f'web_view_link': repr(self.web_view_link)}
2023
class_name = "PresentationExportResult"
2124
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
2225
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -29,5 +32,6 @@ def to_dict(self):
2932
Returns:
3033
dict: The dict value representation of the class parameters
3134
"""
32-
resp = {'file_path': self.file_path}
35+
resp = {'file_path': self.file_path,
36+
'web_view_link': self.web_view_link}
3337
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}

0 commit comments

Comments
 (0)