Skip to content

Commit 05c1216

Browse files
committed
Version 1.4.54
1 parent f7d6ad0 commit 05c1216

File tree

318 files changed

+1513
-313
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

318 files changed

+1513
-313
lines changed

abacusai/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from .app_user_group_sign_in_token import AppUserGroupSignInToken
2121
from .application_connector import ApplicationConnector
2222
from .audio_gen_settings import AudioGenSettings
23+
from .audio_url_result import AudioUrlResult
2324
from .batch_prediction import BatchPrediction
2425
from .batch_prediction_version import BatchPredictionVersion
2526
from .batch_prediction_version_logs import BatchPredictionVersionLogs
@@ -272,4 +273,4 @@
272273
from .workflow_node_template import WorkflowNodeTemplate
273274

274275

275-
__version__ = "1.4.53"
276+
__version__ = "1.4.54"

abacusai/api_class/enums.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -416,6 +416,8 @@ class ApplicationConnectorType(ApiEnum):
416416
BOX = 'BOX'
417417
SFTPAPPLICATION = 'SFTPAPPLICATION'
418418
OAUTH = 'OAUTH'
419+
SALESFORCE = 'SALESFORCE'
420+
TWITTER = 'TWITTER'
419421

420422
@classmethod
421423
def user_connectors(cls):
@@ -425,9 +427,15 @@ def user_connectors(cls):
425427
cls.GMAILUSER,
426428
cls.SLACK,
427429
cls.JIRA,
428-
cls.ONEDRIVE
430+
cls.ONEDRIVE,
431+
cls.SALESFORCE,
432+
cls.TWITTER
429433
]
430434

435+
@classmethod
436+
def database_connectors(cls):
437+
return [cls.SALESFORCE]
438+
431439

432440
class StreamingConnectorType(ApiEnum):
433441
KAFKA = 'KAFKA'

abacusai/api_class/model.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -450,7 +450,7 @@ def __post_init__(self):
450450
@dataclasses.dataclass
451451
class SystemConnectorTool(ApiClass):
452452
"""
453-
System connector tool
453+
System connector tool used to integrate chatbots with external services.
454454
455455
Args:
456456
value (str): The name of the tool.
@@ -501,6 +501,7 @@ class ChatLLMTrainingConfig(TrainingConfig):
501501
json_response_schema (str): Specifies the JSON schema that the model should adhere to if `response_format` is set to "JSON". This should be a json-formatted string where each field of the expected schema is mapped to a dictionary containing the fields 'type', 'required' and 'description'. For example - '{"sample_field": {"type": "integer", "required": true, "description": "Sample Field"}}'
502502
mask_pii (bool): Mask PII in the prompts and uploaded documents before sending it to the LLM.
503503
builtin_tools (List[SystemConnectorTool]): List of builtin system connector tools to use in the ChatLLM. Using builtin tools does not require enabling tool bar (enable_tool_bar flag).
504+
mcp_server_configs (str): JSON string of MCP servers configs to use in the ChatLLM model. This should not be used with document_retrievers.
504505
"""
505506
document_retrievers: List[str] = dataclasses.field(default=None)
506507
num_completion_tokens: int = dataclasses.field(default=None)
@@ -539,6 +540,7 @@ class ChatLLMTrainingConfig(TrainingConfig):
539540
json_response_schema: str = dataclasses.field(default=None)
540541
mask_pii: bool = dataclasses.field(default=None)
541542
builtin_tools: List[SystemConnectorTool] = dataclasses.field(default=None)
543+
mcp_server_configs: str = dataclasses.field(default=None)
542544

543545
def __post_init__(self):
544546
self.problem_type = enums.ProblemType.CHAT_LLM

abacusai/audio_url_result.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
from .return_class import AbstractApiClass
2+
3+
4+
class AudioUrlResult(AbstractApiClass):
5+
"""
6+
TTS result
7+
8+
Args:
9+
client (ApiClient): An authenticated API Client instance
10+
audioUrl (str): The audio url.
11+
creditsUsed (float): The credits used.
12+
"""
13+
14+
def __init__(self, client, audioUrl=None, creditsUsed=None):
15+
super().__init__(client, None)
16+
self.audio_url = audioUrl
17+
self.credits_used = creditsUsed
18+
self.deprecated_keys = {}
19+
20+
def __repr__(self):
21+
repr_dict = {f'audio_url': repr(
22+
self.audio_url), f'credits_used': repr(self.credits_used)}
23+
class_name = "AudioUrlResult"
24+
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
25+
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
26+
return f"{class_name}({repr_str})"
27+
28+
def to_dict(self):
29+
"""
30+
Get a dict representation of the parameters in this class
31+
32+
Returns:
33+
dict: The dict value representation of the class parameters
34+
"""
35+
resp = {'audio_url': self.audio_url, 'credits_used': self.credits_used}
36+
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}

abacusai/batch_prediction.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def __init__(self, client, batchPredictionId=None, createdAt=None, name=None, de
8484
BatchPredictionArgs, globalPredictionArgs)
8585
self.batch_prediction_args = client._build_class(getattr(
8686
api_class, batchPredictionArgsType, BatchPredictionArgs) if batchPredictionArgsType else BatchPredictionArgs, batchPredictionArgs)
87-
self.deprecated_keys = {'global_prediction_args', 'explanations'}
87+
self.deprecated_keys = {'explanations', 'global_prediction_args'}
8888

8989
def __repr__(self):
9090
repr_dict = {f'batch_prediction_id': repr(self.batch_prediction_id), f'created_at': repr(self.created_at), f'name': repr(self.name), f'deployment_id': repr(self.deployment_id), f'file_connector_output_location': repr(self.file_connector_output_location), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'output_feature_group_id': repr(self.output_feature_group_id), f'feature_group_table_name': repr(self.feature_group_table_name), f'output_feature_group_table_name': repr(self.output_feature_group_table_name), f'summary_feature_group_table_name': repr(self.summary_feature_group_table_name), f'csv_input_prefix': repr(

abacusai/batch_prediction_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def __init__(self, client, batchPredictionVersion=None, batchPredictionId=None,
100100
BatchPredictionArgs, globalPredictionArgs)
101101
self.batch_prediction_args = client._build_class(getattr(
102102
api_class, batchPredictionArgsType, BatchPredictionArgs) if batchPredictionArgsType else BatchPredictionArgs, batchPredictionArgs)
103-
self.deprecated_keys = {'global_prediction_args', 'explanations'}
103+
self.deprecated_keys = {'explanations', 'global_prediction_args'}
104104

105105
def __repr__(self):
106106
repr_dict = {f'batch_prediction_version': repr(self.batch_prediction_version), f'batch_prediction_id': repr(self.batch_prediction_id), f'status': repr(self.status), f'drift_monitor_status': repr(self.drift_monitor_status), f'deployment_id': repr(self.deployment_id), f'model_id': repr(self.model_id), f'model_version': repr(self.model_version), f'predictions_started_at': repr(self.predictions_started_at), f'predictions_completed_at': repr(self.predictions_completed_at), f'database_output_error': repr(self.database_output_error), f'total_predictions': repr(self.total_predictions), f'failed_predictions': repr(self.failed_predictions), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'file_connector_output_location': repr(self.file_connector_output_location), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'error': repr(self.error), f'drift_monitor_error': repr(self.drift_monitor_error), f'monitor_warnings': repr(self.monitor_warnings), f'csv_input_prefix': repr(

abacusai/client.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,7 @@
173173
from .streaming_auth_token import StreamingAuthToken
174174
from .streaming_connector import StreamingConnector
175175
from .training_config_options import TrainingConfigOptions
176+
from .unified_connector import UnifiedConnector
176177
from .upload import Upload
177178
from .upload_part import UploadPart
178179
from .use_case import UseCase
@@ -661,7 +662,7 @@ class BaseApiClient:
661662
client_options (ClientOptions): Optional API client configurations
662663
skip_version_check (bool): If true, will skip checking the server's current API version on initializing the client
663664
"""
664-
client_version = '1.4.53'
665+
client_version = '1.4.54'
665666

666667
def __init__(self, api_key: str = None, server: str = None, client_options: ClientOptions = None, skip_version_check: bool = False, include_tb: bool = False):
667668
self.api_key = api_key
@@ -1587,7 +1588,7 @@ def list_application_connector_objects(self, application_connector_id: str) -> l
15871588
application_connector_id (str): Unique string identifier for the application connector."""
15881589
return self._call_api('listApplicationConnectorObjects', 'GET', query_params={'applicationConnectorId': application_connector_id})
15891590

1590-
def get_connector_auth(self, service: Union[ApplicationConnectorType, str] = None, application_connector_id: str = None, scopes: List = None) -> ApplicationConnector:
1591+
def get_connector_auth(self, service: Union[ApplicationConnectorType, str] = None, application_connector_id: str = None, scopes: List = None) -> UnifiedConnector:
15911592
"""Get the authentication details for a given connector. For user level connectors, the service is required. For org level connectors, the application_connector_id is required.
15921593

15931594
Args:
@@ -1596,19 +1597,19 @@ def get_connector_auth(self, service: Union[ApplicationConnectorType, str] = Non
15961597
scopes (List): The scopes to request for the connector.
15971598

15981599
Returns:
1599-
ApplicationConnector: The application connector with the authentication details."""
1600-
return self._call_api('getConnectorAuth', 'GET', query_params={'service': service, 'applicationConnectorId': application_connector_id, 'scopes': scopes}, parse_type=ApplicationConnector)
1600+
UnifiedConnector: The application connector with the authentication details."""
1601+
return self._call_api('getConnectorAuth', 'GET', query_params={'service': service, 'applicationConnectorId': application_connector_id, 'scopes': scopes}, parse_type=UnifiedConnector)
16011602

1602-
def get_user_connector_auth(self, service: Union[ApplicationConnectorType, str], scopes: List = None) -> ApplicationConnector:
1603+
def get_user_connector_auth(self, service: Union[ApplicationConnectorType, str], scopes: List = None) -> UnifiedConnector:
16031604
"""Get the authentication details for a given user level connector.
16041605

16051606
Args:
16061607
service (ApplicationConnectorType): The service name.
16071608
scopes (List): The scopes to request for the connector.
16081609

16091610
Returns:
1610-
ApplicationConnector: The application connector with the authentication details."""
1611-
return self._call_api('getUserConnectorAuth', 'GET', query_params={'service': service, 'scopes': scopes}, parse_type=ApplicationConnector)
1611+
UnifiedConnector: The application connector with the authentication details."""
1612+
return self._call_api('getUserConnectorAuth', 'GET', query_params={'service': service, 'scopes': scopes}, parse_type=UnifiedConnector)
16121613

16131614
def list_streaming_connectors(self) -> List[StreamingConnector]:
16141615
"""Retrieves a list of all streaming connectors along with their corresponding attributes.
@@ -7550,7 +7551,7 @@ def get_chat_response_with_binary_data(self, deployment_token: str, deployment_i
75507551
deployment_id, deployment_token) if deployment_token else None
75517552
return self._call_api('getChatResponseWithBinaryData', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, data={'messages': json.dumps(messages) if (messages is not None and not isinstance(messages, str)) else messages, 'llmName': json.dumps(llm_name) if (llm_name is not None and not isinstance(llm_name, str)) else llm_name, 'numCompletionTokens': json.dumps(num_completion_tokens) if (num_completion_tokens is not None and not isinstance(num_completion_tokens, str)) else num_completion_tokens, 'systemMessage': json.dumps(system_message) if (system_message is not None and not isinstance(system_message, str)) else system_message, 'temperature': json.dumps(temperature) if (temperature is not None and not isinstance(temperature, str)) else temperature, 'filterKeyValues': json.dumps(filter_key_values) if (filter_key_values is not None and not isinstance(filter_key_values, str)) else filter_key_values, 'searchScoreCutoff': json.dumps(search_score_cutoff) if (search_score_cutoff is not None and not isinstance(search_score_cutoff, str)) else search_score_cutoff, 'chatConfig': json.dumps(chat_config) if (chat_config is not None and not isinstance(chat_config, str)) else chat_config}, files=attachments, server_override=prediction_url)
75527553

7553-
def get_conversation_response(self, deployment_id: str, message: str, deployment_token: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, doc_infos: list = None, user_info: dict = None) -> Dict:
7554+
def get_conversation_response(self, deployment_id: str, message: str, deployment_token: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, doc_infos: list = None, user_info: dict = None, execute_usercode_tool: bool = False) -> Dict:
75547555
"""Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
75557556

75567557
Args:
@@ -7566,10 +7567,11 @@ def get_conversation_response(self, deployment_id: str, message: str, deployment
75667567
filter_key_values (dict): A dictionary mapping column names to a list of values to restrict the retrived search results.
75677568
search_score_cutoff (float): Cutoff for the document retriever score. Matching search results below this score will be ignored.
75687569
chat_config (dict): A dictionary specifiying the query chat config override.
7569-
doc_infos (list): An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore."""
7570+
doc_infos (list): An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
7571+
execute_usercode_tool (bool): If True, will return the tool output in the response."""
75707572
prediction_url = self._get_prediction_endpoint(
75717573
deployment_id, deployment_token) if deployment_token else None
7572-
return self._call_api('getConversationResponse', 'POST', query_params={'deploymentId': deployment_id, 'deploymentToken': deployment_token}, body={'message': message, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'llmName': llm_name, 'numCompletionTokens': num_completion_tokens, 'systemMessage': system_message, 'temperature': temperature, 'filterKeyValues': filter_key_values, 'searchScoreCutoff': search_score_cutoff, 'chatConfig': chat_config, 'docInfos': doc_infos, 'userInfo': user_info}, server_override=prediction_url)
7574+
return self._call_api('getConversationResponse', 'POST', query_params={'deploymentId': deployment_id, 'deploymentToken': deployment_token}, body={'message': message, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'llmName': llm_name, 'numCompletionTokens': num_completion_tokens, 'systemMessage': system_message, 'temperature': temperature, 'filterKeyValues': filter_key_values, 'searchScoreCutoff': search_score_cutoff, 'chatConfig': chat_config, 'docInfos': doc_infos, 'userInfo': user_info, 'executeUsercodeTool': execute_usercode_tool}, server_override=prediction_url)
75737575

75747576
def get_conversation_response_with_binary_data(self, deployment_id: str, deployment_token: str, message: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, attachments: None = None) -> Dict:
75757577
"""Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).

abacusai/deployment.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,7 @@ def create_realtime_monitor(self, realtime_monitor_schedule: str = None, lookbac
345345
"""
346346
return self.client.create_realtime_monitor(self.deployment_id, realtime_monitor_schedule, lookback_time)
347347

348-
def get_conversation_response(self, message: str, deployment_token: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, doc_infos: list = None, user_info: dict = None):
348+
def get_conversation_response(self, message: str, deployment_token: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, doc_infos: list = None, user_info: dict = None, execute_usercode_tool: bool = False):
349349
"""
350350
Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
351351
@@ -362,8 +362,9 @@ def get_conversation_response(self, message: str, deployment_token: str, deploym
362362
search_score_cutoff (float): Cutoff for the document retriever score. Matching search results below this score will be ignored.
363363
chat_config (dict): A dictionary specifiying the query chat config override.
364364
doc_infos (list): An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
365+
execute_usercode_tool (bool): If True, will return the tool output in the response.
365366
"""
366-
return self.client.get_conversation_response(self.deployment_id, message, deployment_token, deployment_conversation_id, external_session_id, llm_name, num_completion_tokens, system_message, temperature, filter_key_values, search_score_cutoff, chat_config, doc_infos, user_info)
367+
return self.client.get_conversation_response(self.deployment_id, message, deployment_token, deployment_conversation_id, external_session_id, llm_name, num_completion_tokens, system_message, temperature, filter_key_values, search_score_cutoff, chat_config, doc_infos, user_info, execute_usercode_tool)
367368

368369
def get_conversation_response_with_binary_data(self, deployment_token: str, message: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, attachments: None = None):
369370
"""

0 commit comments

Comments
 (0)