Skip to content

Commit f76f79a

Browse files
committed
Version 1.4.31
1 parent 01dce72 commit f76f79a

File tree

309 files changed

+3018
-1202
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

309 files changed

+3018
-1202
lines changed

abacusai/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@
6767
from .deployment_statistics import DeploymentStatistics
6868
from .document_data import DocumentData
6969
from .document_retriever import DocumentRetriever
70-
from .document_retriever_config import DocumentRetrieverConfig
7170
from .document_retriever_lookup_result import DocumentRetrieverLookupResult
7271
from .document_retriever_version import DocumentRetrieverVersion
7372
from .drift_distribution import DriftDistribution
@@ -209,6 +208,7 @@
209208
from .resolved_feature_group_template import ResolvedFeatureGroupTemplate
210209
from .routing_action import RoutingAction
211210
from .schema import Schema
211+
from .sftp_key import SftpKey
212212
from .streaming_auth_token import StreamingAuthToken
213213
from .streaming_client import StreamingClient
214214
from .streaming_connector import StreamingConnector
@@ -236,4 +236,4 @@
236236
from .workflow_node_template import WorkflowNodeTemplate
237237

238238

239-
__version__ = "1.4.30"
239+
__version__ = "1.4.31"

abacusai/api_class/ai_agents.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -427,6 +427,9 @@ def _raw_init(cls, name: str, input_mappings: List[WorkflowNodeInputMapping] = N
427427
elif function_name and (source_code or template_metadata):
428428
workflow_node.function_name = function_name
429429
workflow_node.source_code = source_code
430+
elif template_metadata and not template_metadata.get('initialized'):
431+
workflow_node.function_name = function_name
432+
workflow_node.source_code = source_code
430433
else:
431434
raise ValueError('workflow_graph_node', 'Either function or function_name and source_code must be provided.')
432435
workflow_node.input_mappings = input_mappings
@@ -504,6 +507,10 @@ def is_trigger_node(self):
504507

505508
@classmethod
506509
def from_dict(cls, node: dict):
510+
if node.get('template_metadata'):
511+
node['function_name'] = node.get('function_name')
512+
node['source_code'] = node.get('source_code')
513+
507514
validate_input_dict_param(node, friendly_class_name='workflow_graph_node', must_contain=['name', 'function_name', 'source_code'])
508515
_cls = cls._raw_init if node.get('__return_filter') else cls
509516
if node.get('template_metadata') and node.get('template_metadata').get('template_type') == 'trigger':

abacusai/api_class/dataset.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ class DocumentProcessingConfig(ApiClass):
4545
remove_watermarks (bool): Whether to remove watermarks. By default, it will be decided automatically based on the OCR mode and the document type. This option only takes effect when extract_bounding_boxes is True.
4646
convert_to_markdown (bool): Whether to convert extracted text to markdown. Defaults to False. This option only takes effect when extract_bounding_boxes is True.
4747
mask_pii (bool): Whether to mask personally identifiable information (PII) in the document text/tokens. Defaults to False.
48+
extract_images (bool): Whether to extract images from the document e.g. diagrams in a PDF page. Defaults to False.
4849
"""
4950
# NOTE: The defaults should match with clouddb.document_processing_results table defaults
5051
document_type: DocumentType = None
@@ -56,6 +57,7 @@ class DocumentProcessingConfig(ApiClass):
5657
remove_watermarks: bool = True
5758
convert_to_markdown: bool = False
5859
mask_pii: bool = False
60+
extract_images: bool = False
5961

6062
def __post_init__(self):
6163
self.ocr_mode = self._detect_ocr_mode()

abacusai/api_class/dataset_application_connector.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,12 @@ class BoxDatasetConfig(ApplicationConnectorDatasetConfig):
5050
Dataset config for Box Application Connector
5151
Args:
5252
location (str): The regex location of the files to fetch
53+
csv_delimiter (str): If the file format is CSV, use a specific csv delimiter
54+
merge_file_schemas (bool): Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
5355
"""
5456
location: str = dataclasses.field(default=None)
57+
csv_delimiter: str = dataclasses.field(default=None)
58+
merge_file_schemas: bool = dataclasses.field(default=False)
5559

5660
def __post_init__(self):
5761
self.application_connector_type = enums.ApplicationConnectorType.BOX

abacusai/api_class/document_retriever.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ class VectorStoreConfig(ApiClass):
1919
index_metadata_columns (bool): If True, metadata columns of the FG will also be used for indexing and querying.
2020
use_document_summary (bool): If True, uses the summary of the document in addition to chunks of the document for indexing and querying.
2121
summary_instructions (str): Instructions for the LLM to generate the document summary.
22+
standalone_deployment (bool): If True, the document retriever will be deployed as a standalone deployment.
2223
"""
2324
chunk_size: int = dataclasses.field(default=None)
2425
chunk_overlap_fraction: float = dataclasses.field(default=None)
@@ -29,6 +30,7 @@ class VectorStoreConfig(ApiClass):
2930
index_metadata_columns: bool = dataclasses.field(default=None)
3031
use_document_summary: bool = dataclasses.field(default=None)
3132
summary_instructions: str = dataclasses.field(default=None)
33+
standalone_deployment: bool = dataclasses.field(default=False)
3234

3335

3436
DocumentRetrieverConfig = VectorStoreConfig

abacusai/api_class/enums.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -541,6 +541,7 @@ class PythonFunctionType(ApiEnum):
541541
FEATURE_GROUP = 'FEATURE_GROUP'
542542
PLOTLY_FIG = 'PLOTLY_FIG'
543543
STEP_FUNCTION = 'STEP_FUNCTION'
544+
USERCODE_TOOL = 'USERCODE_TOOL'
544545

545546

546547
class EvalArtifactType(ApiEnum):

abacusai/api_class/python_functions.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,16 @@ class PythonFunctionArgument(ApiClass):
1616
is_required (bool): Whether the argument is required
1717
value (Any): The value of the argument
1818
pipeline_variable (str): The name of the pipeline variable to use as the value
19+
description (str): The description of the argument
20+
item_type (str): Type of items when variable_type is LIST
1921
"""
2022
variable_type: enums.PythonFunctionArgumentType = dataclasses.field(default=None)
2123
name: str = dataclasses.field(default=None)
2224
is_required: bool = dataclasses.field(default=True)
2325
value: Any = dataclasses.field(default=None)
2426
pipeline_variable: str = dataclasses.field(default=None)
27+
description: str = dataclasses.field(default=None)
28+
item_type: str = dataclasses.field(default=None)
2529

2630

2731
@dataclasses.dataclass

abacusai/batch_prediction.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def __init__(self, client, batchPredictionId=None, createdAt=None, name=None, de
8484
BatchPredictionArgs, globalPredictionArgs)
8585
self.batch_prediction_args = client._build_class(getattr(
8686
api_class, batchPredictionArgsType, BatchPredictionArgs) if batchPredictionArgsType else BatchPredictionArgs, batchPredictionArgs)
87-
self.deprecated_keys = {'explanations', 'global_prediction_args'}
87+
self.deprecated_keys = {'global_prediction_args', 'explanations'}
8888

8989
def __repr__(self):
9090
repr_dict = {f'batch_prediction_id': repr(self.batch_prediction_id), f'created_at': repr(self.created_at), f'name': repr(self.name), f'deployment_id': repr(self.deployment_id), f'file_connector_output_location': repr(self.file_connector_output_location), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'output_feature_group_id': repr(self.output_feature_group_id), f'feature_group_table_name': repr(self.feature_group_table_name), f'output_feature_group_table_name': repr(self.output_feature_group_table_name), f'summary_feature_group_table_name': repr(self.summary_feature_group_table_name), f'csv_input_prefix': repr(

abacusai/batch_prediction_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def __init__(self, client, batchPredictionVersion=None, batchPredictionId=None,
100100
BatchPredictionArgs, globalPredictionArgs)
101101
self.batch_prediction_args = client._build_class(getattr(
102102
api_class, batchPredictionArgsType, BatchPredictionArgs) if batchPredictionArgsType else BatchPredictionArgs, batchPredictionArgs)
103-
self.deprecated_keys = {'explanations', 'global_prediction_args'}
103+
self.deprecated_keys = {'global_prediction_args', 'explanations'}
104104

105105
def __repr__(self):
106106
repr_dict = {f'batch_prediction_version': repr(self.batch_prediction_version), f'batch_prediction_id': repr(self.batch_prediction_id), f'status': repr(self.status), f'drift_monitor_status': repr(self.drift_monitor_status), f'deployment_id': repr(self.deployment_id), f'model_id': repr(self.model_id), f'model_version': repr(self.model_version), f'predictions_started_at': repr(self.predictions_started_at), f'predictions_completed_at': repr(self.predictions_completed_at), f'database_output_error': repr(self.database_output_error), f'total_predictions': repr(self.total_predictions), f'failed_predictions': repr(self.failed_predictions), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'file_connector_output_location': repr(self.file_connector_output_location), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'error': repr(self.error), f'drift_monitor_error': repr(self.drift_monitor_error), f'monitor_warnings': repr(self.monitor_warnings), f'csv_input_prefix': repr(

abacusai/client.py

Lines changed: 98 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -652,7 +652,7 @@ class BaseApiClient:
652652
client_options (ClientOptions): Optional API client configurations
653653
skip_version_check (bool): If true, will skip checking the server's current API version on initializing the client
654654
"""
655-
client_version = '1.4.30'
655+
client_version = '1.4.31'
656656

657657
def __init__(self, api_key: str = None, server: str = None, client_options: ClientOptions = None, skip_version_check: bool = False, include_tb: bool = False):
658658
self.api_key = api_key
@@ -3935,13 +3935,102 @@ def get_agent_context_user_info(self):
39353935
raise ValueError(
39363936
'User information not available. Please use UI interface for this agent to work.')
39373937

3938+
def get_agent_runtime_config(self, key: str):
3939+
"""
3940+
Gets the deployment level runtime config for the agent
3941+
3942+
Args:
3943+
key(str): Key for which the config value is to be fetched
3944+
3945+
Returns:
3946+
str: Config value for the input key
3947+
"""
3948+
runtime_config = get_object_from_context(
3949+
self, _request_context, 'deployment_runtime_config', dict) or {}
3950+
return runtime_config.get(key, None)
3951+
3952+
def get_request_user_info(self):
3953+
"""
3954+
Gets the user information for the current request context.
3955+
3956+
Returns:
3957+
dict: Containing email and name of the end user.
3958+
"""
3959+
user_info = get_object_from_context(
3960+
self, _request_context, 'user_info', dict)
3961+
if user_info:
3962+
return user_info
3963+
else:
3964+
raise ValueError('User information not available')
3965+
39383966
def clear_agent_context(self):
39393967
"""
39403968
Clears the current request context.
39413969
"""
39423970
if hasattr(_request_context):
39433971
_request_context.clear()
39443972

3973+
def execute_chatllm_computer_streaming(self, computer_id: str, prompt: str, is_transient: bool = False):
3974+
"""
3975+
Executes a prompt on a remote computer and streams computer responses to the external chat UI in real-time. Must be called from agent execution context only.
3976+
3977+
Args:
3978+
computer_id (str): The ID of the computer to use for the agent.
3979+
prompt (str): The prompt to do tasks on the computer.
3980+
is_transient (bool): If True, the message will be marked as transient and will not be persisted on reload in external chatllm UI. Transient messages are useful for streaming interim updates or results.
3981+
3982+
Returns:
3983+
text (str): The text responses from the computer.
3984+
"""
3985+
request_id = self._get_agent_app_request_id()
3986+
caller = self._get_agent_async_app_caller()
3987+
proxy_caller = self._is_proxy_app_caller()
3988+
3989+
if not request_id or not caller:
3990+
raise Exception(
3991+
'This function can only be called from within an agent execution context')
3992+
3993+
if not caller.endswith('/'):
3994+
caller = caller + '/'
3995+
3996+
if proxy_caller:
3997+
api_endpoint = f'{caller}_executeChatLLMComputerStreaming'
3998+
else:
3999+
raise Exception(
4000+
'This function can only be called from within an agent execution context')
4001+
4002+
extra_args = {'stream_type': StreamType.MESSAGE.value,
4003+
'response_version': '1.0', 'is_transient': is_transient}
4004+
if hasattr(_request_context, 'agent_workflow_node_id'):
4005+
extra_args.update(
4006+
{'agent_workflow_node_id': _request_context.agent_workflow_node_id})
4007+
4008+
computer_use_args = {
4009+
'computerId': computer_id,
4010+
'prompt': prompt
4011+
}
4012+
4013+
body = {
4014+
'requestId': request_id,
4015+
'computerUseArgs': computer_use_args,
4016+
'extraArgs': extra_args,
4017+
}
4018+
body['connectionId'] = uuid4().hex
4019+
4020+
headers = {'APIKEY': self.api_key}
4021+
self._clean_api_objects(body)
4022+
for _ in range(3):
4023+
response = self._request(
4024+
api_endpoint, method='POST', body=body, headers=headers)
4025+
if response.status_code == 200:
4026+
return StreamingHandler(response.json(), _request_context, is_transient=is_transient)
4027+
elif response.status_code in (502, 503, 504):
4028+
continue
4029+
else:
4030+
break
4031+
raise Exception(
4032+
f'Error calling ChatLLM computer streaming endpoint. Status code: {response.status_code}. Response: {response.text}')
4033+
39454034
def streaming_evaluate_prompt(self, prompt: str = None, system_message: str = None, llm_name: Union[LLMName, str] = None, max_tokens: int = None, temperature: float = 0.0, messages: list = None, response_type: str = None, json_response_schema: dict = None, section_key: str = None):
39464035
"""
39474036
Generate response to the prompt using the specified model. This works the same as `evaluate_prompt` but would stream the text to the UI section while generating and returns the streamed text as an object of a `str` subclass.
@@ -7921,7 +8010,7 @@ def get_feature_group_row_process_logs_by_key(self, deployment_id: str, primary_
79218010
FeatureGroupRowProcessLogs: An object representing the logs for the feature group row process"""
79228011
return self._call_api('getFeatureGroupRowProcessLogsByKey', 'POST', query_params={'deploymentId': deployment_id}, body={'primaryKeyValue': primary_key_value}, parse_type=FeatureGroupRowProcessLogs)
79238012

7924-
def create_python_function(self, name: str, source_code: str = None, function_name: str = None, function_variable_mappings: List = None, package_requirements: list = None, function_type: str = 'FEATURE_GROUP') -> PythonFunction:
8013+
def create_python_function(self, name: str, source_code: str = None, function_name: str = None, function_variable_mappings: List = None, package_requirements: list = None, function_type: str = 'FEATURE_GROUP', description: str = None, examples: dict = None) -> PythonFunction:
79258014
"""Creates a custom Python function that is reusable.
79268015

79278016
Args:
@@ -7931,12 +8020,14 @@ def create_python_function(self, name: str, source_code: str = None, function_na
79318020
function_variable_mappings (List): List of Python function arguments.
79328021
package_requirements (list): List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
79338022
function_type (str): Type of Python function to create. Default is FEATURE_GROUP, but can also be PLOTLY_FIG.
8023+
description (str): Description of the Python function. This should include details about the function's purpose, expected inputs and outputs, and any important usage considerations or limitations.
8024+
examples (dict): Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.
79348025

79358026
Returns:
79368027
PythonFunction: The Python function that can be used (e.g. for feature group transform)."""
7937-
return self._call_api('createPythonFunction', 'POST', query_params={}, body={'name': name, 'sourceCode': source_code, 'functionName': function_name, 'functionVariableMappings': function_variable_mappings, 'packageRequirements': package_requirements, 'functionType': function_type}, parse_type=PythonFunction)
8028+
return self._call_api('createPythonFunction', 'POST', query_params={}, body={'name': name, 'sourceCode': source_code, 'functionName': function_name, 'functionVariableMappings': function_variable_mappings, 'packageRequirements': package_requirements, 'functionType': function_type, 'description': description, 'examples': examples}, parse_type=PythonFunction)
79388029

7939-
def update_python_function(self, name: str, source_code: str = None, function_name: str = None, function_variable_mappings: List = None, package_requirements: list = None) -> PythonFunction:
8030+
def update_python_function(self, name: str, source_code: str = None, function_name: str = None, function_variable_mappings: List = None, package_requirements: list = None, description: str = None, examples: dict = None) -> PythonFunction:
79408031
"""Update custom python function with user inputs for the given python function.
79418032

79428033
Args:
@@ -7945,10 +8036,12 @@ def update_python_function(self, name: str, source_code: str = None, function_na
79458036
function_name (str): The name of the Python function within `source_code`.
79468037
function_variable_mappings (List): List of arguments required by `function_name`.
79478038
package_requirements (list): List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
8039+
description (str): Description of the Python function. This should include details about the function's purpose, expected inputs and outputs, and any important usage considerations or limitations.
8040+
examples (dict): Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.
79488041

79498042
Returns:
79508043
PythonFunction: The Python function object."""
7951-
return self._call_api('updatePythonFunction', 'PATCH', query_params={}, body={'name': name, 'sourceCode': source_code, 'functionName': function_name, 'functionVariableMappings': function_variable_mappings, 'packageRequirements': package_requirements}, parse_type=PythonFunction)
8044+
return self._call_api('updatePythonFunction', 'PATCH', query_params={}, body={'name': name, 'sourceCode': source_code, 'functionName': function_name, 'functionVariableMappings': function_variable_mappings, 'packageRequirements': package_requirements, 'description': description, 'examples': examples}, parse_type=PythonFunction)
79528045

79538046
def delete_python_function(self, name: str):
79548047
"""Removes an existing Python function.

0 commit comments

Comments
 (0)