Skip to content

Commit dd67d0f

Browse files
committed
Version 1.4.43
1 parent 1060c70 commit dd67d0f

File tree

324 files changed

+6288
-240
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

324 files changed

+6288
-240
lines changed

abacusai/__init__.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,12 +34,14 @@
3434
from .chatllm_task import ChatllmTask
3535
from .client import AgentResponse, ApiClient, ApiException, ClientOptions, ReadOnlyClient, ToolResponse, _request_context
3636
from .code_agent_response import CodeAgentResponse
37+
from .code_autocomplete_edit_prediction_response import CodeAutocompleteEditPredictionResponse
3738
from .code_autocomplete_response import CodeAutocompleteResponse
3839
from .code_bot import CodeBot
3940
from .code_edit import CodeEdit
4041
from .code_edit_response import CodeEditResponse
4142
from .code_edits import CodeEdits
4243
from .code_embeddings import CodeEmbeddings
44+
from .code_llm_changed_files import CodeLlmChangedFiles
4345
from .code_source import CodeSource
4446
from .code_suggestion_validation_response import CodeSuggestionValidationResponse
4547
from .code_summary_response import CodeSummaryResponse
@@ -67,6 +69,7 @@
6769
from .dataset_column import DatasetColumn
6870
from .dataset_version import DatasetVersion
6971
from .dataset_version_logs import DatasetVersionLogs
72+
from .default_llm import DefaultLlm
7073
from .deployment import Deployment
7174
from .deployment_auth_token import DeploymentAuthToken
7275
from .deployment_conversation import DeploymentConversation
@@ -153,6 +156,7 @@
153156
from .llm_input import LlmInput
154157
from .llm_parameters import LlmParameters
155158
from .llm_response import LlmResponse
159+
from .mcp_config import McpConfig
156160
from .memory_options import MemoryOptions
157161
from .messaging_connector_response import MessagingConnectorResponse
158162
from .model import Model
@@ -248,6 +252,7 @@
248252
from .video_gen_settings import VideoGenSettings
249253
from .video_search_result import VideoSearchResult
250254
from .voice_gen_details import VoiceGenDetails
255+
from .web_app_domain import WebAppDomain
251256
from .web_page_response import WebPageResponse
252257
from .web_search_response import WebSearchResponse
253258
from .web_search_result import WebSearchResult
@@ -256,4 +261,4 @@
256261
from .workflow_node_template import WorkflowNodeTemplate
257262

258263

259-
__version__ = "1.4.42"
264+
__version__ = "1.4.43"

abacusai/api_class/ai_agents.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -495,6 +495,12 @@ def from_template(cls, template_name: str, name: str, configs: dict = None, inpu
495495
else:
496496
raise ValueError('workflow_graph_node', 'Invalid output schema. Must be a WorkflowNodeOutputSchema or a list of output section names.')
497497

498+
if sleep_time is not None:
499+
if isinstance(sleep_time, str) and sleep_time.isdigit():
500+
sleep_time = int(sleep_time)
501+
if not isinstance(sleep_time, int) or sleep_time < 0:
502+
raise ValueError('workflow_graph_node', 'Invalid sleep time. Must be a non-negative integer.')
503+
498504
return cls(
499505
name=name,
500506
input_mappings=instance_input_mappings,

abacusai/api_class/enums.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -481,7 +481,10 @@ class VectorStoreTextEncoder(ApiEnum):
481481
CODE_BERT = 'CODE_BERT'
482482

483483

484-
@deprecated_enums('OPENAI_GPT4_32K', 'OPENAI_GPT3_5', 'OPENAI_GPT3_5_TEXT', 'LLAMA3_LARGE_CHAT', 'CLAUDE_V3_OPUS', 'CLAUDE_V3_SONNET', 'OPENAI_GPT4', 'OPENAI_GPT4_128K', 'QWEN_2_5_32B_BASE')
484+
@deprecated_enums('OPENAI_GPT4_32K', 'OPENAI_GPT3_5', 'OPENAI_GPT3_5_TEXT',
485+
'OPENAI_GPT4', 'OPENAI_GPT4_128K', 'OPENAI_GPT4_128K_LATEST',
486+
'LLAMA3_LARGE_CHAT', 'CLAUDE_V3_OPUS', 'CLAUDE_V3_SONNET', 'CLAUDE_V3_HAIKU',
487+
'QWEN_2_5_32B_BASE')
485488
class LLMName(ApiEnum):
486489
OPENAI_GPT4 = 'OPENAI_GPT4'
487490
OPENAI_GPT4_32K = 'OPENAI_GPT4_32K'

abacusai/batch_prediction.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def __init__(self, client, batchPredictionId=None, createdAt=None, name=None, de
8484
BatchPredictionArgs, globalPredictionArgs)
8585
self.batch_prediction_args = client._build_class(getattr(
8686
api_class, batchPredictionArgsType, BatchPredictionArgs) if batchPredictionArgsType else BatchPredictionArgs, batchPredictionArgs)
87-
self.deprecated_keys = {'explanations', 'global_prediction_args'}
87+
self.deprecated_keys = {'global_prediction_args', 'explanations'}
8888

8989
def __repr__(self):
9090
repr_dict = {f'batch_prediction_id': repr(self.batch_prediction_id), f'created_at': repr(self.created_at), f'name': repr(self.name), f'deployment_id': repr(self.deployment_id), f'file_connector_output_location': repr(self.file_connector_output_location), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'output_feature_group_id': repr(self.output_feature_group_id), f'feature_group_table_name': repr(self.feature_group_table_name), f'output_feature_group_table_name': repr(self.output_feature_group_table_name), f'summary_feature_group_table_name': repr(self.summary_feature_group_table_name), f'csv_input_prefix': repr(

abacusai/batch_prediction_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def __init__(self, client, batchPredictionVersion=None, batchPredictionId=None,
100100
BatchPredictionArgs, globalPredictionArgs)
101101
self.batch_prediction_args = client._build_class(getattr(
102102
api_class, batchPredictionArgsType, BatchPredictionArgs) if batchPredictionArgsType else BatchPredictionArgs, batchPredictionArgs)
103-
self.deprecated_keys = {'explanations', 'global_prediction_args'}
103+
self.deprecated_keys = {'global_prediction_args', 'explanations'}
104104

105105
def __repr__(self):
106106
repr_dict = {f'batch_prediction_version': repr(self.batch_prediction_version), f'batch_prediction_id': repr(self.batch_prediction_id), f'status': repr(self.status), f'drift_monitor_status': repr(self.drift_monitor_status), f'deployment_id': repr(self.deployment_id), f'model_id': repr(self.model_id), f'model_version': repr(self.model_version), f'predictions_started_at': repr(self.predictions_started_at), f'predictions_completed_at': repr(self.predictions_completed_at), f'database_output_error': repr(self.database_output_error), f'total_predictions': repr(self.total_predictions), f'failed_predictions': repr(self.failed_predictions), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'file_connector_output_location': repr(self.file_connector_output_location), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'error': repr(self.error), f'drift_monitor_error': repr(self.drift_monitor_error), f'monitor_warnings': repr(self.monitor_warnings), f'csv_input_prefix': repr(

abacusai/client.py

Lines changed: 61 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ async def sse_asynchronous_generator(endpoint: str, headers: dict, body: dict):
213213
except Exception:
214214
raise Exception('Please install aiohttp to use this functionality')
215215

216-
async with aiohttp.request('POST', endpoint, json=body, headers=headers) as response:
216+
async with aiohttp.request('POST', endpoint, json=body, headers=headers, timeout=aiohttp.ClientTimeout(total=0)) as response:
217217
async for line in response.content:
218218
if line:
219219
streamed_responses = line.decode('utf-8').split('\n\n')
@@ -661,7 +661,7 @@ class BaseApiClient:
661661
client_options (ClientOptions): Optional API client configurations
662662
skip_version_check (bool): If true, will skip checking the server's current API version on initializing the client
663663
"""
664-
client_version = '1.4.42'
664+
client_version = '1.4.43'
665665

666666
def __init__(self, api_key: str = None, server: str = None, client_options: ClientOptions = None, skip_version_check: bool = False, include_tb: bool = False):
667667
self.api_key = api_key
@@ -2838,6 +2838,13 @@ def copy_agent(self, agent_id: str, project_id: str = None) -> Agent:
28382838
Agent: The newly generated agent."""
28392839
return self._call_api('copyAgent', 'GET', query_params={'agentId': agent_id, 'projectId': project_id}, parse_type=Agent)
28402840

2841+
def sdk_link_hosted_app(self, app: str = None) -> io.BytesIO:
2842+
"""Returns custom SDK JS for Widget JS
2843+
2844+
Args:
2845+
app (str): Application ID to be used as appId"""
2846+
return self._call_api('sdkLinkHostedApp', 'GET', query_params={'app': app}, streamable_response=True)
2847+
28412848
def list_llm_apps(self) -> List[LlmApp]:
28422849
"""Lists all available LLM Apps, which are LLMs tailored to achieve a specific task like code generation for a specific service's API.
28432850

@@ -3035,6 +3042,47 @@ def _serialize_df_with_dtypes(df):
30353042
deployment_token=deployment_token, deployment_id=deployment_id, query_data=query_data, solve_time_limit_seconds=solve_time_limit_seconds, optimality_gap_limit=optimality_gap_limit)
30363043
return result
30373044

3045+
def get_optimisation_input_dataframes_with_new_inputs(deployment_token: str, deployment_id: str, query_data: dict):
3046+
"""
3047+
Get assignments for given query, with new inputs
3048+
3049+
Args:
3050+
deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
3051+
deployment_id (str): The unique identifier of a deployment created under the project.
3052+
query_data (dict): a dictionary with various key: value pairs corresponding to various updated FGs in the FG tree, which we want to update to compute new top level FGs for online solve. values are dataframes and keys are their names. Names should be same as the ones used during training.
3053+
3054+
Returns:
3055+
OptimizationAssignment: The output dataframes for a given query.
3056+
"""
3057+
def _serialize_df_with_dtypes(df):
3058+
# Get dtypes dictionary
3059+
dtypes_dict = df.dtypes.apply(lambda x: str(x)).to_dict()
3060+
3061+
# Handle special dtypes
3062+
for col, dtype in dtypes_dict.items():
3063+
if 'datetime' in dtype.lower():
3064+
dtypes_dict[col] = 'datetime'
3065+
elif 'category' in dtype.lower():
3066+
dtypes_dict[col] = 'category'
3067+
3068+
# Convert DataFrame to JSON
3069+
json_data = df.to_json(date_format='iso')
3070+
3071+
# Create final dictionary with both data and dtypes
3072+
serialized = {
3073+
'data': json_data,
3074+
'dtypes': dtypes_dict
3075+
}
3076+
3077+
return json.dumps(serialized)
3078+
3079+
query_data = {name: _serialize_df_with_dtypes(
3080+
df) for name, df in query_data.items()}
3081+
3082+
result = self.get_optimisation_input_dataframes_with_new_serialized_inputs(
3083+
deployment_token=deployment_token, deployment_id=deployment_id, query_data=query_data)
3084+
return result
3085+
30383086
def create_dataset_version_from_pandas(self, table_name_or_id: str, df: pd.DataFrame, clean_column_names: bool = False) -> Dataset:
30393087
"""
30403088
[Deprecated]
@@ -7593,6 +7641,17 @@ def get_alternative_assignments(self, deployment_token: str, deployment_id: str,
75937641
deployment_id, deployment_token) if deployment_token else None
75947642
return self._call_api('getAlternativeAssignments', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'addConstraints': add_constraints, 'solveTimeLimitSeconds': solve_time_limit_seconds, 'bestAlternateOnly': best_alternate_only}, server_override=prediction_url)
75957643

7644+
def get_optimisation_inputs_from_serialized(self, deployment_token: str, deployment_id: str, query_data: dict = None) -> Dict:
7645+
"""Get assignments for given query, with new inputs
7646+
7647+
Args:
7648+
deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
7649+
deployment_id (str): The unique identifier of a deployment created under the project.
7650+
query_data (dict): a dictionary with various key: value pairs corresponding to various updated FGs in the FG tree, which we want to update to compute new top level FGs for online solve. (query data will be dict of names: serialized dataframes)"""
7651+
prediction_url = self._get_prediction_endpoint(
7652+
deployment_id, deployment_token) if deployment_token else None
7653+
return self._call_api('getOptimisationInputsFromSerialized', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data}, server_override=prediction_url)
7654+
75967655
def get_assignments_online_with_new_serialized_inputs(self, deployment_token: str, deployment_id: str, query_data: dict = None, solve_time_limit_seconds: float = None, optimality_gap_limit: float = None) -> Dict:
75977656
"""Get assignments for given query, with new inputs
75987657

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
from .return_class import AbstractApiClass
2+
3+
4+
class CodeAutocompleteEditPredictionResponse(AbstractApiClass):
5+
"""
6+
A autocomplete response from an LLM
7+
8+
Args:
9+
client (ApiClient): An authenticated API Client instance
10+
autocompleteResponse (str): autocomplete code
11+
showAutocomplete (bool): Whether to show autocomplete in the client
12+
"""
13+
14+
def __init__(self, client, autocompleteResponse=None, showAutocomplete=None):
15+
super().__init__(client, None)
16+
self.autocomplete_response = autocompleteResponse
17+
self.show_autocomplete = showAutocomplete
18+
self.deprecated_keys = {}
19+
20+
def __repr__(self):
21+
repr_dict = {f'autocomplete_response': repr(
22+
self.autocomplete_response), f'show_autocomplete': repr(self.show_autocomplete)}
23+
class_name = "CodeAutocompleteEditPredictionResponse"
24+
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
25+
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
26+
return f"{class_name}({repr_str})"
27+
28+
def to_dict(self):
29+
"""
30+
Get a dict representation of the parameters in this class
31+
32+
Returns:
33+
dict: The dict value representation of the class parameters
34+
"""
35+
resp = {'autocomplete_response': self.autocomplete_response,
36+
'show_autocomplete': self.show_autocomplete}
37+
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}

abacusai/code_llm_changed_files.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
from .return_class import AbstractApiClass
2+
3+
4+
class CodeLlmChangedFiles(AbstractApiClass):
5+
"""
6+
Code changed files
7+
8+
Args:
9+
client (ApiClient): An authenticated API Client instance
10+
addedFiles (list): A list of added file paths.
11+
updatedFiles (list): A list of updated file paths.
12+
deletedFiles (list): A list of deleted file paths.
13+
"""
14+
15+
def __init__(self, client, addedFiles=None, updatedFiles=None, deletedFiles=None):
16+
super().__init__(client, None)
17+
self.added_files = addedFiles
18+
self.updated_files = updatedFiles
19+
self.deleted_files = deletedFiles
20+
self.deprecated_keys = {}
21+
22+
def __repr__(self):
23+
repr_dict = {f'added_files': repr(self.added_files), f'updated_files': repr(
24+
self.updated_files), f'deleted_files': repr(self.deleted_files)}
25+
class_name = "CodeLlmChangedFiles"
26+
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
27+
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
28+
return f"{class_name}({repr_str})"
29+
30+
def to_dict(self):
31+
"""
32+
Get a dict representation of the parameters in this class
33+
34+
Returns:
35+
dict: The dict value representation of the class parameters
36+
"""
37+
resp = {'added_files': self.added_files,
38+
'updated_files': self.updated_files, 'deleted_files': self.deleted_files}
39+
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}

abacusai/default_llm.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
from .return_class import AbstractApiClass
2+
3+
4+
class DefaultLlm(AbstractApiClass):
5+
"""
6+
A default LLM.
7+
8+
Args:
9+
client (ApiClient): An authenticated API Client instance
10+
name (str): The name of the LLM.
11+
enum (str): The enum of the LLM.
12+
"""
13+
14+
def __init__(self, client, name=None, enum=None):
15+
super().__init__(client, None)
16+
self.name = name
17+
self.enum = enum
18+
self.deprecated_keys = {}
19+
20+
def __repr__(self):
21+
repr_dict = {f'name': repr(self.name), f'enum': repr(self.enum)}
22+
class_name = "DefaultLlm"
23+
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
24+
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
25+
return f"{class_name}({repr_str})"
26+
27+
def to_dict(self):
28+
"""
29+
Get a dict representation of the parameters in this class
30+
31+
Returns:
32+
dict: The dict value representation of the class parameters
33+
"""
34+
resp = {'name': self.name, 'enum': self.enum}
35+
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}

0 commit comments

Comments
 (0)