Skip to content

Commit 9e779dc

Browse files
Version 1.4.48
1 parent 9fcf762 commit 9e779dc

File tree

329 files changed

+3955
-1141
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

329 files changed

+3955
-1141
lines changed

abacusai/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,6 +253,8 @@
253253
from .use_case_requirements import UseCaseRequirements
254254
from .user import User
255255
from .user_exception import UserException
256+
from .video_gen_model import VideoGenModel
257+
from .video_gen_model_options import VideoGenModelOptions
256258
from .video_gen_settings import VideoGenSettings
257259
from .video_search_result import VideoSearchResult
258260
from .voice_gen_details import VoiceGenDetails
@@ -265,4 +267,4 @@
265267
from .workflow_node_template import WorkflowNodeTemplate
266268

267269

268-
__version__ = "1.4.47"
270+
__version__ = "1.4.48"

abacusai/api_class/dataset_application_connector.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,15 +30,14 @@ class ConfluenceDatasetConfig(ApplicationConnectorDatasetConfig):
3030
Dataset config for Confluence Application Connector
3131
Args:
3232
location (str): The location of the pages to fetch
33-
space_key (str): The space key of the space from which we fetch pages
3433
pull_attachments (bool): Whether to pull attachments for each page
3534
extract_bounding_boxes (bool): Whether to extract bounding boxes from the documents
36-
35+
location_type (str): The type of location to be fetched. Maps values in `location` to content type, example: 'spaceKey/folderTitle/*' -> 'folder'
3736
"""
3837
location: str = dataclasses.field(default=None)
39-
space_key: str = dataclasses.field(default=None)
4038
pull_attachments: bool = dataclasses.field(default=False)
4139
extract_bounding_boxes: bool = dataclasses.field(default=False) # TODO: Deprecate in favour of document_processing_config
40+
location_type: str = dataclasses.field(default=None)
4241

4342
def __post_init__(self):
4443
self.application_connector_type = enums.ApplicationConnectorType.CONFLUENCE
@@ -106,9 +105,15 @@ class JiraDatasetConfig(ApplicationConnectorDatasetConfig):
106105
107106
Args:
108107
jql (str): The JQL query for fetching issues
108+
custom_fields (list): A list of custom fields to include in the dataset
109+
include_comments (bool): Fetch comments for each issue
110+
include_watchers (bool): Fetch watchers for each issue
109111
110112
"""
111113
jql: str = dataclasses.field(default=None)
114+
custom_fields: list = dataclasses.field(default=None)
115+
include_comments: bool = dataclasses.field(default=False)
116+
include_watchers: bool = dataclasses.field(default=False)
112117

113118
def __post_init__(self):
114119
self.application_connector_type = enums.ApplicationConnectorType.JIRA

abacusai/api_class/enums.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -507,6 +507,8 @@ class LLMName(ApiEnum):
507507
CLAUDE_V3_HAIKU = 'CLAUDE_V3_HAIKU'
508508
CLAUDE_V3_5_SONNET = 'CLAUDE_V3_5_SONNET'
509509
CLAUDE_V3_7_SONNET = 'CLAUDE_V3_7_SONNET'
510+
CLAUDE_V4_SONNET = 'CLAUDE_V4_SONNET'
511+
CLAUDE_V4_OPUS = 'CLAUDE_V4_OPUS'
510512
CLAUDE_V3_5_HAIKU = 'CLAUDE_V3_5_HAIKU'
511513
GEMINI_1_5_PRO = 'GEMINI_1_5_PRO'
512514
GEMINI_2_FLASH = 'GEMINI_2_FLASH'

abacusai/chatllm_task.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,36 +8,42 @@ class ChatllmTask(AbstractApiClass):
88
Args:
99
client (ApiClient): An authenticated API Client instance
1010
chatllmTaskId (str): The id of the chatllm task.
11+
daemonTaskId (str): The id of the daemon task.
12+
taskType (str): The type of task ('chatllm' or 'daemon').
1113
name (str): The name of the chatllm task.
1214
instructions (str): The instructions of the chatllm task.
1315
lifecycle (str): The lifecycle of the chatllm task.
1416
scheduleInfo (dict): The schedule info of the chatllm task.
1517
externalApplicationId (str): The external application id associated with the chatllm task.
1618
deploymentConversationId (str): The deployment conversation id associated with the chatllm task.
19+
sourceDeploymentConversationId (str): The source deployment conversation id associated with the chatllm task.
1720
enableEmailAlerts (bool): Whether email alerts are enabled for the chatllm task.
1821
email (str): The email to send alerts to.
1922
numUnreadTaskInstances (int): The number of unread task instances for the chatllm task.
2023
computePointsUsed (int): The compute points used for the chatllm task.
2124
"""
2225

23-
def __init__(self, client, chatllmTaskId=None, name=None, instructions=None, lifecycle=None, scheduleInfo=None, externalApplicationId=None, deploymentConversationId=None, enableEmailAlerts=None, email=None, numUnreadTaskInstances=None, computePointsUsed=None):
26+
def __init__(self, client, chatllmTaskId=None, daemonTaskId=None, taskType=None, name=None, instructions=None, lifecycle=None, scheduleInfo=None, externalApplicationId=None, deploymentConversationId=None, sourceDeploymentConversationId=None, enableEmailAlerts=None, email=None, numUnreadTaskInstances=None, computePointsUsed=None):
2427
super().__init__(client, chatllmTaskId)
2528
self.chatllm_task_id = chatllmTaskId
29+
self.daemon_task_id = daemonTaskId
30+
self.task_type = taskType
2631
self.name = name
2732
self.instructions = instructions
2833
self.lifecycle = lifecycle
2934
self.schedule_info = scheduleInfo
3035
self.external_application_id = externalApplicationId
3136
self.deployment_conversation_id = deploymentConversationId
37+
self.source_deployment_conversation_id = sourceDeploymentConversationId
3238
self.enable_email_alerts = enableEmailAlerts
3339
self.email = email
3440
self.num_unread_task_instances = numUnreadTaskInstances
3541
self.compute_points_used = computePointsUsed
3642
self.deprecated_keys = {}
3743

3844
def __repr__(self):
39-
repr_dict = {f'chatllm_task_id': repr(self.chatllm_task_id), f'name': repr(self.name), f'instructions': repr(self.instructions), f'lifecycle': repr(self.lifecycle), f'schedule_info': repr(self.schedule_info), f'external_application_id': repr(self.external_application_id), f'deployment_conversation_id': repr(
40-
self.deployment_conversation_id), f'enable_email_alerts': repr(self.enable_email_alerts), f'email': repr(self.email), f'num_unread_task_instances': repr(self.num_unread_task_instances), f'compute_points_used': repr(self.compute_points_used)}
45+
repr_dict = {f'chatllm_task_id': repr(self.chatllm_task_id), f'daemon_task_id': repr(self.daemon_task_id), f'task_type': repr(self.task_type), f'name': repr(self.name), f'instructions': repr(self.instructions), f'lifecycle': repr(self.lifecycle), f'schedule_info': repr(self.schedule_info), f'external_application_id': repr(self.external_application_id), f'deployment_conversation_id': repr(
46+
self.deployment_conversation_id), f'source_deployment_conversation_id': repr(self.source_deployment_conversation_id), f'enable_email_alerts': repr(self.enable_email_alerts), f'email': repr(self.email), f'num_unread_task_instances': repr(self.num_unread_task_instances), f'compute_points_used': repr(self.compute_points_used)}
4147
class_name = "ChatllmTask"
4248
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
4349
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -50,6 +56,6 @@ def to_dict(self):
5056
Returns:
5157
dict: The dict value representation of the class parameters
5258
"""
53-
resp = {'chatllm_task_id': self.chatllm_task_id, 'name': self.name, 'instructions': self.instructions, 'lifecycle': self.lifecycle, 'schedule_info': self.schedule_info, 'external_application_id': self.external_application_id,
54-
'deployment_conversation_id': self.deployment_conversation_id, 'enable_email_alerts': self.enable_email_alerts, 'email': self.email, 'num_unread_task_instances': self.num_unread_task_instances, 'compute_points_used': self.compute_points_used}
59+
resp = {'chatllm_task_id': self.chatllm_task_id, 'daemon_task_id': self.daemon_task_id, 'task_type': self.task_type, 'name': self.name, 'instructions': self.instructions, 'lifecycle': self.lifecycle, 'schedule_info': self.schedule_info, 'external_application_id': self.external_application_id,
60+
'deployment_conversation_id': self.deployment_conversation_id, 'source_deployment_conversation_id': self.source_deployment_conversation_id, 'enable_email_alerts': self.enable_email_alerts, 'email': self.email, 'num_unread_task_instances': self.num_unread_task_instances, 'compute_points_used': self.compute_points_used}
5561
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}

abacusai/client.py

Lines changed: 9 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -661,7 +661,7 @@ class BaseApiClient:
661661
client_options (ClientOptions): Optional API client configurations
662662
skip_version_check (bool): If true, will skip checking the server's current API version on initializing the client
663663
"""
664-
client_version = '1.4.47'
664+
client_version = '1.4.48'
665665

666666
def __init__(self, api_key: str = None, server: str = None, client_options: ClientOptions = None, skip_version_check: bool = False, include_tb: bool = False):
667667
self.api_key = api_key
@@ -855,8 +855,9 @@ def _proxy_request(self, name: str, method: str = 'POST', query_params: dict = N
855855
headers = {'APIKEY': self.api_key}
856856
deployment_id = os.getenv('ABACUS_EXEC_SERVICE_DEPLOYMENT_ID')
857857
if deployment_id:
858-
query_params = {**(query_params or {}),
859-
'environmentDeploymentId': deployment_id}
858+
if not (query_params or {}).get('deploymentId') or deployment_id == (query_params or {}).get('deploymentId'):
859+
query_params = {**(query_params or {}),
860+
'environmentDeploymentId': deployment_id}
860861
caller = self._get_agent_caller()
861862
request_id = self._get_agent_app_request_id()
862863
if caller and request_id:
@@ -870,7 +871,7 @@ def _proxy_request(self, name: str, method: str = 'POST', query_params: dict = N
870871
self, _request_context, 'deployment_id', str)
871872
if hashed_deployment_id:
872873
query_params = {**(query_params or {}),
873-
'deploymentId': hashed_deployment_id}
874+
'billingDeploymentId': hashed_deployment_id}
874875
user_info = get_object_from_context(
875876
self, _request_context, 'user_info', dict)
876877
if user_info:
@@ -7836,9 +7837,7 @@ def execute_agent(self, deployment_token: str, deployment_id: str, arguments: li
78367837
deployment_id (str): A unique string identifier for the deployment created under the project.
78377838
arguments (list): Positional arguments to the agent execute function.
78387839
keyword_arguments (dict): A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function."""
7839-
prediction_url = self._get_prediction_endpoint(
7840-
deployment_id, deployment_token) if deployment_token else None
7841-
return self._call_api('executeAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments}, server_override=prediction_url, timeout=1500)
7840+
return self._proxy_request('executeAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments}, is_sync=True)
78427841

78437842
def get_matrix_agent_schema(self, deployment_token: str, deployment_id: str, query: str, doc_infos: list = None, deployment_conversation_id: str = None, external_session_id: str = None) -> Dict:
78447843
"""Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
@@ -7867,9 +7866,7 @@ def execute_conversation_agent(self, deployment_token: str, deployment_id: str,
78677866
regenerate (bool): If True, will regenerate the response from the last query.
78687867
doc_infos (list): An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
78697868
agent_workflow_node_id (str): An optional agent workflow node id to trigger agent execution from an intermediate node."""
7870-
prediction_url = self._get_prediction_endpoint(
7871-
deployment_id, deployment_token) if deployment_token else None
7872-
return self._call_api('executeConversationAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'regenerate': regenerate, 'docInfos': doc_infos, 'agentWorkflowNodeId': agent_workflow_node_id}, server_override=prediction_url)
7869+
return self._proxy_request('executeSyncConversationAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'regenerate': regenerate, 'docInfos': doc_infos, 'agentWorkflowNodeId': agent_workflow_node_id}, is_sync=True)
78737870

78747871
def lookup_matches(self, deployment_token: str, deployment_id: str, data: str = None, filters: dict = None, num: int = None, result_columns: list = None, max_words: int = None, num_retrieval_margin_words: int = None, max_words_per_chunk: int = None, score_multiplier_column: str = None, min_score: float = None, required_phrases: list = None, filter_clause: str = None, crowding_limits: dict = None, include_text_search: bool = False) -> List[DocumentRetrieverLookupResult]:
78757872
"""Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
@@ -7913,7 +7910,7 @@ def get_completion(self, deployment_token: str, deployment_id: str, prompt: str)
79137910
deployment_id, deployment_token) if deployment_token else None
79147911
return self._call_api('getCompletion', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'prompt': prompt}, server_override=prediction_url)
79157912

7916-
def execute_agent_with_binary_data(self, deployment_token: str, deployment_id: str, arguments: list = None, keyword_arguments: dict = None, deployment_conversation_id: str = None, external_session_id: str = None, blobs: None = None) -> Dict:
7913+
def execute_agent_with_binary_data(self, deployment_token: str, deployment_id: str, arguments: list = None, keyword_arguments: dict = None, deployment_conversation_id: str = None, external_session_id: str = None, blobs: None = None) -> AgentDataExecutionResult:
79177914
"""Executes a deployed AI agent function with binary data as inputs.
79187915

79197916
Args:
@@ -7927,9 +7924,7 @@ def execute_agent_with_binary_data(self, deployment_token: str, deployment_id: s
79277924

79287925
Returns:
79297926
AgentDataExecutionResult: The result of the agent execution"""
7930-
prediction_url = self._get_prediction_endpoint(
7931-
deployment_id, deployment_token) if deployment_token else None
7932-
return self._call_api('executeAgentWithBinaryData', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, data={'arguments': json.dumps(arguments) if (arguments is not None and not isinstance(arguments, str)) else arguments, 'keywordArguments': json.dumps(keyword_arguments) if (keyword_arguments is not None and not isinstance(keyword_arguments, str)) else keyword_arguments, 'deploymentConversationId': json.dumps(deployment_conversation_id) if (deployment_conversation_id is not None and not isinstance(deployment_conversation_id, str)) else deployment_conversation_id, 'externalSessionId': json.dumps(external_session_id) if (external_session_id is not None and not isinstance(external_session_id, str)) else external_session_id}, parse_type=AgentDataExecutionResult, files=blobs, server_override=prediction_url, timeout=1500)
7927+
return self._proxy_request('executeAgentWithBinaryData', 'POST', query_params={}, data={'deploymentToken': deployment_token, 'deploymentId': deployment_id, 'arguments': arguments, 'keywordArguments': json.dumps(keyword_arguments.to_dict()) if hasattr(keyword_arguments, 'to_dict') else json.dumps(keyword_arguments), 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id}, files=blobs, parse_type=AgentDataExecutionResult, is_sync=True)
79337928

79347929
def start_autonomous_agent(self, deployment_token: str, deployment_id: str, arguments: list = None, keyword_arguments: dict = None, save_conversations: bool = True) -> Dict:
79357930
"""Starts a deployed Autonomous agent associated with the given deployment_conversation_id using the arguments and keyword arguments as inputs for execute function of trigger node.

0 commit comments

Comments
 (0)