diff --git a/.sdk-version b/.sdk-version index 3d7e843..f9eccc9 100644 --- a/.sdk-version +++ b/.sdk-version @@ -1 +1 @@ -v1.89.4 +v1.91.1 diff --git a/README.md b/README.md index 59c6f0a..3b260eb 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,6 @@ Class | Method | HTTP request | Description *AnalysesResultsMetadataApi* | [**get_pdf**](docs/AnalysesResultsMetadataApi.md#get_pdf) | **GET** /v2/analyses/{analysis_id}/pdf | Gets the PDF found in the analysis *AnalysesResultsMetadataApi* | [**get_sbom**](docs/AnalysesResultsMetadataApi.md#get_sbom) | **GET** /v2/analyses/{analysis_id}/sbom | Gets the software-bill-of-materials (SBOM) found in the analysis *AnalysesResultsMetadataApi* | [**get_tags**](docs/AnalysesResultsMetadataApi.md#get_tags) | **GET** /v2/analyses/{analysis_id}/tags | Get function tags with maliciousness score -*AnalysesResultsMetadataApi* | [**get_threat_score**](docs/AnalysesResultsMetadataApi.md#get_threat_score) | **GET** /v2/analyses/{analysis_id}/threat_score | Gets the threat score found in the analysis *AnalysesResultsMetadataApi* | [**get_vulnerabilities**](docs/AnalysesResultsMetadataApi.md#get_vulnerabilities) | **GET** /v2/analyses/{analysis_id}/vulnerabilities | Gets the vulnerabilities found in the analysis *AnalysesSecurityChecksApi* | [**create_scurity_checks_task**](docs/AnalysesSecurityChecksApi.md#create_scurity_checks_task) | **POST** /v2/analyses/{analysis_id}/security-checks | Queues a security check process *AnalysesSecurityChecksApi* | [**get_security_checks**](docs/AnalysesSecurityChecksApi.md#get_security_checks) | **GET** /v2/analyses/{analysis_id}/security-checks | Get Security Checks @@ -151,8 +150,6 @@ Class | Method | HTTP request | Description *FunctionsRenamingHistoryApi* | [**get_function_name_history**](docs/FunctionsRenamingHistoryApi.md#get_function_name_history) | **GET** /v2/functions/history/{function_id} | Get Function Name History *FunctionsRenamingHistoryApi* | [**rename_function_id**](docs/FunctionsRenamingHistoryApi.md#rename_function_id) | **POST** /v2/functions/rename/{function_id} | Rename Function *FunctionsRenamingHistoryApi* | [**revert_function_name**](docs/FunctionsRenamingHistoryApi.md#revert_function_name) | **POST** /v2/functions/history/{function_id}/{history_id} | Revert the function name -*FunctionsThreatScoreApi* | [**get_all_function_threat_scores**](docs/FunctionsThreatScoreApi.md#get_all_function_threat_scores) | **GET** /v2/analyses/{analysis_id}/functions/threat_score | Gets the threat score for all functions -*FunctionsThreatScoreApi* | [**get_individual_function_threat_score**](docs/FunctionsThreatScoreApi.md#get_individual_function_threat_score) | **GET** /v2/analyses/{analysis_id}/functions/{function_id}/threat_score | Gets the threat score analysis *ModelsApi* | [**get_models**](docs/ModelsApi.md#get_models) | **GET** /v2/models | Gets models *SearchApi* | [**search_binaries**](docs/SearchApi.md#search_binaries) | **GET** /v2/search/binaries | Binaries search *SearchApi* | [**search_collections**](docs/SearchApi.md#search_collections) | **GET** /v2/search/collections | Collections search @@ -177,7 +174,6 @@ Class | Method | HTTP request | Description - [AnalysisScope](docs/AnalysisScope.md) - [AnalysisStringsResponse](docs/AnalysisStringsResponse.md) - [AnalysisTags](docs/AnalysisTags.md) - - [AnalysisThreatScoreData](docs/AnalysisThreatScoreData.md) - [AnalysisUpdateRequest](docs/AnalysisUpdateRequest.md) - [AnalysisUpdateTagsRequest](docs/AnalysisUpdateTagsRequest.md) - [AnalysisUpdateTagsResponse](docs/AnalysisUpdateTagsResponse.md) @@ -201,7 +197,6 @@ Class | Method | HTTP request | Description - [BaseResponseAnalysisFunctions](docs/BaseResponseAnalysisFunctions.md) - [BaseResponseAnalysisStringsResponse](docs/BaseResponseAnalysisStringsResponse.md) - [BaseResponseAnalysisTags](docs/BaseResponseAnalysisTags.md) - - [BaseResponseAnalysisThreatScoreData](docs/BaseResponseAnalysisThreatScoreData.md) - [BaseResponseAnalysisUpdateTagsResponse](docs/BaseResponseAnalysisUpdateTagsResponse.md) - [BaseResponseBasic](docs/BaseResponseBasic.md) - [BaseResponseBinaryAdditionalResponse](docs/BaseResponseBinaryAdditionalResponse.md) @@ -228,7 +223,6 @@ Class | Method | HTTP request | Description - [BaseResponseDict](docs/BaseResponseDict.md) - [BaseResponseDynamicExecutionStatus](docs/BaseResponseDynamicExecutionStatus.md) - [BaseResponseExternalResponse](docs/BaseResponseExternalResponse.md) - - [BaseResponseFunctionAnalysisThreatScoreData](docs/BaseResponseFunctionAnalysisThreatScoreData.md) - [BaseResponseFunctionBlocksResponse](docs/BaseResponseFunctionBlocksResponse.md) - [BaseResponseFunctionCapabilityResponse](docs/BaseResponseFunctionCapabilityResponse.md) - [BaseResponseFunctionDataTypes](docs/BaseResponseFunctionDataTypes.md) @@ -236,7 +230,6 @@ Class | Method | HTTP request | Description - [BaseResponseFunctionSearchResponse](docs/BaseResponseFunctionSearchResponse.md) - [BaseResponseFunctionStringsResponse](docs/BaseResponseFunctionStringsResponse.md) - [BaseResponseFunctionTaskResponse](docs/BaseResponseFunctionTaskResponse.md) - - [BaseResponseFunctionThreatScore](docs/BaseResponseFunctionThreatScore.md) - [BaseResponseFunctionsDetailResponse](docs/BaseResponseFunctionsDetailResponse.md) - [BaseResponseGenerateFunctionDataTypes](docs/BaseResponseGenerateFunctionDataTypes.md) - [BaseResponseGenerationStatusList](docs/BaseResponseGenerationStatusList.md) @@ -337,7 +330,6 @@ Class | Method | HTTP request | Description - [FileHashes](docs/FileHashes.md) - [FileMetadata](docs/FileMetadata.md) - [Filters](docs/Filters.md) - - [FunctionAnalysisThreatScoreData](docs/FunctionAnalysisThreatScoreData.md) - [FunctionBatchAnn](docs/FunctionBatchAnn.md) - [FunctionBlockDestinationResponse](docs/FunctionBlockDestinationResponse.md) - [FunctionBlockResponse](docs/FunctionBlockResponse.md) @@ -374,7 +366,6 @@ Class | Method | HTTP request | Description - [FunctionStringsResponse](docs/FunctionStringsResponse.md) - [FunctionTaskResponse](docs/FunctionTaskResponse.md) - [FunctionTaskStatus](docs/FunctionTaskStatus.md) - - [FunctionThreatScore](docs/FunctionThreatScore.md) - [FunctionTypeInput](docs/FunctionTypeInput.md) - [FunctionTypeOutput](docs/FunctionTypeOutput.md) - [FunctionsDetailResponse](docs/FunctionsDetailResponse.md) diff --git a/docs/AnalysesResultsMetadataApi.md b/docs/AnalysesResultsMetadataApi.md index 2b3c9fa..1549ac1 100644 --- a/docs/AnalysesResultsMetadataApi.md +++ b/docs/AnalysesResultsMetadataApi.md @@ -10,7 +10,6 @@ Method | HTTP request | Description [**get_pdf**](AnalysesResultsMetadataApi.md#get_pdf) | **GET** /v2/analyses/{analysis_id}/pdf | Gets the PDF found in the analysis [**get_sbom**](AnalysesResultsMetadataApi.md#get_sbom) | **GET** /v2/analyses/{analysis_id}/sbom | Gets the software-bill-of-materials (SBOM) found in the analysis [**get_tags**](AnalysesResultsMetadataApi.md#get_tags) | **GET** /v2/analyses/{analysis_id}/tags | Get function tags with maliciousness score -[**get_threat_score**](AnalysesResultsMetadataApi.md#get_threat_score) | **GET** /v2/analyses/{analysis_id}/threat_score | Gets the threat score found in the analysis [**get_vulnerabilities**](AnalysesResultsMetadataApi.md#get_vulnerabilities) | **GET** /v2/analyses/{analysis_id}/vulnerabilities | Gets the vulnerabilities found in the analysis @@ -503,86 +502,6 @@ Name | Type | Description | Notes [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) -# **get_threat_score** -> BaseResponseAnalysisThreatScoreData get_threat_score(analysis_id, authorization=authorization) - -Gets the threat score found in the analysis - -### Example - -* Api Key Authentication (APIKey): - -```python -import revengai -from revengai.models.base_response_analysis_threat_score_data import BaseResponseAnalysisThreatScoreData -from revengai.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.reveng.ai -# See configuration.py for a list of all supported configuration parameters. -configuration = revengai.Configuration( - host = "https://api.reveng.ai" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure API key authorization: APIKey -configuration.api_key['APIKey'] = os.environ["API_KEY"] - -# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed -# configuration.api_key_prefix['APIKey'] = 'Bearer' - -# Enter a context with an instance of the API client -with revengai.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = revengai.AnalysesResultsMetadataApi(api_client) - analysis_id = 56 # int | - authorization = 'authorization_example' # str | API Key bearer token (optional) - - try: - # Gets the threat score found in the analysis - api_response = api_instance.get_threat_score(analysis_id, authorization=authorization) - print("The response of AnalysesResultsMetadataApi->get_threat_score:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling AnalysesResultsMetadataApi->get_threat_score: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **analysis_id** | **int**| | - **authorization** | **str**| API Key bearer token | [optional] - -### Return type - -[**BaseResponseAnalysisThreatScoreData**](BaseResponseAnalysisThreatScoreData.md) - -### Authorization - -[APIKey](../README.md#APIKey) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | Successful Response | - | -**422** | Invalid request parameters | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - # **get_vulnerabilities** > BaseResponseVulnerabilities get_vulnerabilities(analysis_id, authorization=authorization) diff --git a/docs/AnalysisThreatScoreData.md b/docs/AnalysisThreatScoreData.md deleted file mode 100644 index 1e1bc99..0000000 --- a/docs/AnalysisThreatScoreData.md +++ /dev/null @@ -1,35 +0,0 @@ -# AnalysisThreatScoreData - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**min** | **float** | The minimum value for the analysis score | -**max** | **float** | The maximum value for the analysis score | -**average** | **float** | The average value for the analysis score | -**upper** | **float** | The upper limit for the analysis score | -**lower** | **float** | The lower limit for the analysis score | -**malware_count** | **int** | Number of malware binaries used in threat score calculation | -**benign_count** | **int** | Number of benign binaries used in threat score calculation | - -## Example - -```python -from revengai.models.analysis_threat_score_data import AnalysisThreatScoreData - -# TODO update the JSON string below -json = "{}" -# create an instance of AnalysisThreatScoreData from a JSON string -analysis_threat_score_data_instance = AnalysisThreatScoreData.from_json(json) -# print the JSON string representation of the object -print(AnalysisThreatScoreData.to_json()) - -# convert the object into a dict -analysis_threat_score_data_dict = analysis_threat_score_data_instance.to_dict() -# create an instance of AnalysisThreatScoreData from a dict -analysis_threat_score_data_from_dict = AnalysisThreatScoreData.from_dict(analysis_threat_score_data_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/docs/BaseResponseAnalysisThreatScoreData.md b/docs/BaseResponseAnalysisThreatScoreData.md deleted file mode 100644 index c613841..0000000 --- a/docs/BaseResponseAnalysisThreatScoreData.md +++ /dev/null @@ -1,33 +0,0 @@ -# BaseResponseAnalysisThreatScoreData - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**status** | **bool** | Response status on whether the request succeeded | [optional] [default to True] -**data** | [**AnalysisThreatScoreData**](AnalysisThreatScoreData.md) | | [optional] -**message** | **str** | | [optional] -**errors** | [**List[ErrorModel]**](ErrorModel.md) | | [optional] -**meta** | [**MetaModel**](MetaModel.md) | Metadata | [optional] - -## Example - -```python -from revengai.models.base_response_analysis_threat_score_data import BaseResponseAnalysisThreatScoreData - -# TODO update the JSON string below -json = "{}" -# create an instance of BaseResponseAnalysisThreatScoreData from a JSON string -base_response_analysis_threat_score_data_instance = BaseResponseAnalysisThreatScoreData.from_json(json) -# print the JSON string representation of the object -print(BaseResponseAnalysisThreatScoreData.to_json()) - -# convert the object into a dict -base_response_analysis_threat_score_data_dict = base_response_analysis_threat_score_data_instance.to_dict() -# create an instance of BaseResponseAnalysisThreatScoreData from a dict -base_response_analysis_threat_score_data_from_dict = BaseResponseAnalysisThreatScoreData.from_dict(base_response_analysis_threat_score_data_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/docs/BaseResponseFunctionAnalysisThreatScoreData.md b/docs/BaseResponseFunctionAnalysisThreatScoreData.md deleted file mode 100644 index be95889..0000000 --- a/docs/BaseResponseFunctionAnalysisThreatScoreData.md +++ /dev/null @@ -1,33 +0,0 @@ -# BaseResponseFunctionAnalysisThreatScoreData - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**status** | **bool** | Response status on whether the request succeeded | [optional] [default to True] -**data** | [**FunctionAnalysisThreatScoreData**](FunctionAnalysisThreatScoreData.md) | | [optional] -**message** | **str** | | [optional] -**errors** | [**List[ErrorModel]**](ErrorModel.md) | | [optional] -**meta** | [**MetaModel**](MetaModel.md) | Metadata | [optional] - -## Example - -```python -from revengai.models.base_response_function_analysis_threat_score_data import BaseResponseFunctionAnalysisThreatScoreData - -# TODO update the JSON string below -json = "{}" -# create an instance of BaseResponseFunctionAnalysisThreatScoreData from a JSON string -base_response_function_analysis_threat_score_data_instance = BaseResponseFunctionAnalysisThreatScoreData.from_json(json) -# print the JSON string representation of the object -print(BaseResponseFunctionAnalysisThreatScoreData.to_json()) - -# convert the object into a dict -base_response_function_analysis_threat_score_data_dict = base_response_function_analysis_threat_score_data_instance.to_dict() -# create an instance of BaseResponseFunctionAnalysisThreatScoreData from a dict -base_response_function_analysis_threat_score_data_from_dict = BaseResponseFunctionAnalysisThreatScoreData.from_dict(base_response_function_analysis_threat_score_data_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/docs/BaseResponseFunctionThreatScore.md b/docs/BaseResponseFunctionThreatScore.md deleted file mode 100644 index 1c4ee34..0000000 --- a/docs/BaseResponseFunctionThreatScore.md +++ /dev/null @@ -1,33 +0,0 @@ -# BaseResponseFunctionThreatScore - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**status** | **bool** | Response status on whether the request succeeded | [optional] [default to True] -**data** | [**FunctionThreatScore**](FunctionThreatScore.md) | | [optional] -**message** | **str** | | [optional] -**errors** | [**List[ErrorModel]**](ErrorModel.md) | | [optional] -**meta** | [**MetaModel**](MetaModel.md) | Metadata | [optional] - -## Example - -```python -from revengai.models.base_response_function_threat_score import BaseResponseFunctionThreatScore - -# TODO update the JSON string below -json = "{}" -# create an instance of BaseResponseFunctionThreatScore from a JSON string -base_response_function_threat_score_instance = BaseResponseFunctionThreatScore.from_json(json) -# print the JSON string representation of the object -print(BaseResponseFunctionThreatScore.to_json()) - -# convert the object into a dict -base_response_function_threat_score_dict = base_response_function_threat_score_instance.to_dict() -# create an instance of BaseResponseFunctionThreatScore from a dict -base_response_function_threat_score_from_dict = BaseResponseFunctionThreatScore.from_dict(base_response_function_threat_score_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/docs/FunctionAnalysisThreatScoreData.md b/docs/FunctionAnalysisThreatScoreData.md deleted file mode 100644 index 165b4d4..0000000 --- a/docs/FunctionAnalysisThreatScoreData.md +++ /dev/null @@ -1,35 +0,0 @@ -# FunctionAnalysisThreatScoreData - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**min** | **float** | The minimum value for the analysis score | -**max** | **float** | The maximum value for the analysis score | -**average** | **float** | The average value for the analysis score | -**upper** | **float** | The upper limit for the analysis score | -**lower** | **float** | The lower limit for the analysis score | -**malware_count** | **int** | Number of malware binaries used in threat score calculation | -**benign_count** | **int** | Number of benign binaries used in threat score calculation | - -## Example - -```python -from revengai.models.function_analysis_threat_score_data import FunctionAnalysisThreatScoreData - -# TODO update the JSON string below -json = "{}" -# create an instance of FunctionAnalysisThreatScoreData from a JSON string -function_analysis_threat_score_data_instance = FunctionAnalysisThreatScoreData.from_json(json) -# print the JSON string representation of the object -print(FunctionAnalysisThreatScoreData.to_json()) - -# convert the object into a dict -function_analysis_threat_score_data_dict = function_analysis_threat_score_data_instance.to_dict() -# create an instance of FunctionAnalysisThreatScoreData from a dict -function_analysis_threat_score_data_from_dict = FunctionAnalysisThreatScoreData.from_dict(function_analysis_threat_score_data_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/docs/FunctionThreatScore.md b/docs/FunctionThreatScore.md deleted file mode 100644 index add105e..0000000 --- a/docs/FunctionThreatScore.md +++ /dev/null @@ -1,29 +0,0 @@ -# FunctionThreatScore - - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**results** | [**Dict[str, FunctionAnalysisThreatScoreData]**](FunctionAnalysisThreatScoreData.md) | The results of the function threat | - -## Example - -```python -from revengai.models.function_threat_score import FunctionThreatScore - -# TODO update the JSON string below -json = "{}" -# create an instance of FunctionThreatScore from a JSON string -function_threat_score_instance = FunctionThreatScore.from_json(json) -# print the JSON string representation of the object -print(FunctionThreatScore.to_json()) - -# convert the object into a dict -function_threat_score_dict = function_threat_score_instance.to_dict() -# create an instance of FunctionThreatScore from a dict -function_threat_score_from_dict = FunctionThreatScore.from_dict(function_threat_score_dict) -``` -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/docs/FunctionsThreatScoreApi.md b/docs/FunctionsThreatScoreApi.md deleted file mode 100644 index 4d93e58..0000000 --- a/docs/FunctionsThreatScoreApi.md +++ /dev/null @@ -1,174 +0,0 @@ -# revengai.FunctionsThreatScoreApi - -All URIs are relative to *https://api.reveng.ai* - -Method | HTTP request | Description -------------- | ------------- | ------------- -[**get_all_function_threat_scores**](FunctionsThreatScoreApi.md#get_all_function_threat_scores) | **GET** /v2/analyses/{analysis_id}/functions/threat_score | Gets the threat score for all functions -[**get_individual_function_threat_score**](FunctionsThreatScoreApi.md#get_individual_function_threat_score) | **GET** /v2/analyses/{analysis_id}/functions/{function_id}/threat_score | Gets the threat score analysis - - -# **get_all_function_threat_scores** -> BaseResponseFunctionThreatScore get_all_function_threat_scores(analysis_id, authorization=authorization) - -Gets the threat score for all functions - -Calculates the threat score for all functions inside of an analysis - -### Example - -* Api Key Authentication (APIKey): - -```python -import revengai -from revengai.models.base_response_function_threat_score import BaseResponseFunctionThreatScore -from revengai.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.reveng.ai -# See configuration.py for a list of all supported configuration parameters. -configuration = revengai.Configuration( - host = "https://api.reveng.ai" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure API key authorization: APIKey -configuration.api_key['APIKey'] = os.environ["API_KEY"] - -# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed -# configuration.api_key_prefix['APIKey'] = 'Bearer' - -# Enter a context with an instance of the API client -with revengai.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = revengai.FunctionsThreatScoreApi(api_client) - analysis_id = 56 # int | - authorization = 'authorization_example' # str | API Key bearer token (optional) - - try: - # Gets the threat score for all functions - api_response = api_instance.get_all_function_threat_scores(analysis_id, authorization=authorization) - print("The response of FunctionsThreatScoreApi->get_all_function_threat_scores:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FunctionsThreatScoreApi->get_all_function_threat_scores: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **analysis_id** | **int**| | - **authorization** | **str**| API Key bearer token | [optional] - -### Return type - -[**BaseResponseFunctionThreatScore**](BaseResponseFunctionThreatScore.md) - -### Authorization - -[APIKey](../README.md#APIKey) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | Successful Response | - | -**422** | Invalid request parameters | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **get_individual_function_threat_score** -> BaseResponseFunctionAnalysisThreatScoreData get_individual_function_threat_score(analysis_id, function_id, authorization=authorization) - -Gets the threat score analysis - -### Example - -* Api Key Authentication (APIKey): - -```python -import revengai -from revengai.models.base_response_function_analysis_threat_score_data import BaseResponseFunctionAnalysisThreatScoreData -from revengai.rest import ApiException -from pprint import pprint - -# Defining the host is optional and defaults to https://api.reveng.ai -# See configuration.py for a list of all supported configuration parameters. -configuration = revengai.Configuration( - host = "https://api.reveng.ai" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure API key authorization: APIKey -configuration.api_key['APIKey'] = os.environ["API_KEY"] - -# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed -# configuration.api_key_prefix['APIKey'] = 'Bearer' - -# Enter a context with an instance of the API client -with revengai.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = revengai.FunctionsThreatScoreApi(api_client) - analysis_id = 56 # int | - function_id = 56 # int | - authorization = 'authorization_example' # str | API Key bearer token (optional) - - try: - # Gets the threat score analysis - api_response = api_instance.get_individual_function_threat_score(analysis_id, function_id, authorization=authorization) - print("The response of FunctionsThreatScoreApi->get_individual_function_threat_score:\n") - pprint(api_response) - except Exception as e: - print("Exception when calling FunctionsThreatScoreApi->get_individual_function_threat_score: %s\n" % e) -``` - - - -### Parameters - - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **analysis_id** | **int**| | - **function_id** | **int**| | - **authorization** | **str**| API Key bearer token | [optional] - -### Return type - -[**BaseResponseFunctionAnalysisThreatScoreData**](BaseResponseFunctionAnalysisThreatScoreData.md) - -### Authorization - -[APIKey](../README.md#APIKey) - -### HTTP request headers - - - **Content-Type**: Not defined - - **Accept**: application/json - -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**200** | Successful Response | - | -**422** | Invalid request parameters | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - diff --git a/revengai/__init__.py b/revengai/__init__.py index 1b04094..ebe7dbd 100644 --- a/revengai/__init__.py +++ b/revengai/__init__.py @@ -13,7 +13,7 @@ """ # noqa: E501 -__version__ = "v1.89.4" +__version__ = "v1.91.1" # Define package exports __all__ = [ @@ -34,7 +34,6 @@ "FunctionsDataTypesApi", "FunctionsDecompilationApi", "FunctionsRenamingHistoryApi", - "FunctionsThreatScoreApi", "ModelsApi", "SearchApi", "ApiResponse", @@ -61,7 +60,6 @@ "AnalysisScope", "AnalysisStringsResponse", "AnalysisTags", - "AnalysisThreatScoreData", "AnalysisUpdateRequest", "AnalysisUpdateTagsRequest", "AnalysisUpdateTagsResponse", @@ -85,7 +83,6 @@ "BaseResponseAnalysisFunctions", "BaseResponseAnalysisStringsResponse", "BaseResponseAnalysisTags", - "BaseResponseAnalysisThreatScoreData", "BaseResponseAnalysisUpdateTagsResponse", "BaseResponseBasic", "BaseResponseBinaryAdditionalResponse", @@ -112,7 +109,6 @@ "BaseResponseDict", "BaseResponseDynamicExecutionStatus", "BaseResponseExternalResponse", - "BaseResponseFunctionAnalysisThreatScoreData", "BaseResponseFunctionBlocksResponse", "BaseResponseFunctionCapabilityResponse", "BaseResponseFunctionDataTypes", @@ -120,7 +116,6 @@ "BaseResponseFunctionSearchResponse", "BaseResponseFunctionStringsResponse", "BaseResponseFunctionTaskResponse", - "BaseResponseFunctionThreatScore", "BaseResponseFunctionsDetailResponse", "BaseResponseGenerateFunctionDataTypes", "BaseResponseGenerationStatusList", @@ -221,7 +216,6 @@ "FileHashes", "FileMetadata", "Filters", - "FunctionAnalysisThreatScoreData", "FunctionBatchAnn", "FunctionBlockDestinationResponse", "FunctionBlockResponse", @@ -258,7 +252,6 @@ "FunctionStringsResponse", "FunctionTaskResponse", "FunctionTaskStatus", - "FunctionThreatScore", "FunctionTypeInput", "FunctionTypeOutput", "FunctionsDetailResponse", @@ -379,7 +372,6 @@ from revengai.api.functions_data_types_api import FunctionsDataTypesApi as FunctionsDataTypesApi from revengai.api.functions_decompilation_api import FunctionsDecompilationApi as FunctionsDecompilationApi from revengai.api.functions_renaming_history_api import FunctionsRenamingHistoryApi as FunctionsRenamingHistoryApi -from revengai.api.functions_threat_score_api import FunctionsThreatScoreApi as FunctionsThreatScoreApi from revengai.api.models_api import ModelsApi as ModelsApi from revengai.api.search_api import SearchApi as SearchApi @@ -410,7 +402,6 @@ from revengai.models.analysis_scope import AnalysisScope as AnalysisScope from revengai.models.analysis_strings_response import AnalysisStringsResponse as AnalysisStringsResponse from revengai.models.analysis_tags import AnalysisTags as AnalysisTags -from revengai.models.analysis_threat_score_data import AnalysisThreatScoreData as AnalysisThreatScoreData from revengai.models.analysis_update_request import AnalysisUpdateRequest as AnalysisUpdateRequest from revengai.models.analysis_update_tags_request import AnalysisUpdateTagsRequest as AnalysisUpdateTagsRequest from revengai.models.analysis_update_tags_response import AnalysisUpdateTagsResponse as AnalysisUpdateTagsResponse @@ -434,7 +425,6 @@ from revengai.models.base_response_analysis_functions import BaseResponseAnalysisFunctions as BaseResponseAnalysisFunctions from revengai.models.base_response_analysis_strings_response import BaseResponseAnalysisStringsResponse as BaseResponseAnalysisStringsResponse from revengai.models.base_response_analysis_tags import BaseResponseAnalysisTags as BaseResponseAnalysisTags -from revengai.models.base_response_analysis_threat_score_data import BaseResponseAnalysisThreatScoreData as BaseResponseAnalysisThreatScoreData from revengai.models.base_response_analysis_update_tags_response import BaseResponseAnalysisUpdateTagsResponse as BaseResponseAnalysisUpdateTagsResponse from revengai.models.base_response_basic import BaseResponseBasic as BaseResponseBasic from revengai.models.base_response_binary_additional_response import BaseResponseBinaryAdditionalResponse as BaseResponseBinaryAdditionalResponse @@ -461,7 +451,6 @@ from revengai.models.base_response_dict import BaseResponseDict as BaseResponseDict from revengai.models.base_response_dynamic_execution_status import BaseResponseDynamicExecutionStatus as BaseResponseDynamicExecutionStatus from revengai.models.base_response_external_response import BaseResponseExternalResponse as BaseResponseExternalResponse -from revengai.models.base_response_function_analysis_threat_score_data import BaseResponseFunctionAnalysisThreatScoreData as BaseResponseFunctionAnalysisThreatScoreData from revengai.models.base_response_function_blocks_response import BaseResponseFunctionBlocksResponse as BaseResponseFunctionBlocksResponse from revengai.models.base_response_function_capability_response import BaseResponseFunctionCapabilityResponse as BaseResponseFunctionCapabilityResponse from revengai.models.base_response_function_data_types import BaseResponseFunctionDataTypes as BaseResponseFunctionDataTypes @@ -469,7 +458,6 @@ from revengai.models.base_response_function_search_response import BaseResponseFunctionSearchResponse as BaseResponseFunctionSearchResponse from revengai.models.base_response_function_strings_response import BaseResponseFunctionStringsResponse as BaseResponseFunctionStringsResponse from revengai.models.base_response_function_task_response import BaseResponseFunctionTaskResponse as BaseResponseFunctionTaskResponse -from revengai.models.base_response_function_threat_score import BaseResponseFunctionThreatScore as BaseResponseFunctionThreatScore from revengai.models.base_response_functions_detail_response import BaseResponseFunctionsDetailResponse as BaseResponseFunctionsDetailResponse from revengai.models.base_response_generate_function_data_types import BaseResponseGenerateFunctionDataTypes as BaseResponseGenerateFunctionDataTypes from revengai.models.base_response_generation_status_list import BaseResponseGenerationStatusList as BaseResponseGenerationStatusList @@ -570,7 +558,6 @@ from revengai.models.file_hashes import FileHashes as FileHashes from revengai.models.file_metadata import FileMetadata as FileMetadata from revengai.models.filters import Filters as Filters -from revengai.models.function_analysis_threat_score_data import FunctionAnalysisThreatScoreData as FunctionAnalysisThreatScoreData from revengai.models.function_batch_ann import FunctionBatchAnn as FunctionBatchAnn from revengai.models.function_block_destination_response import FunctionBlockDestinationResponse as FunctionBlockDestinationResponse from revengai.models.function_block_response import FunctionBlockResponse as FunctionBlockResponse @@ -607,7 +594,6 @@ from revengai.models.function_strings_response import FunctionStringsResponse as FunctionStringsResponse from revengai.models.function_task_response import FunctionTaskResponse as FunctionTaskResponse from revengai.models.function_task_status import FunctionTaskStatus as FunctionTaskStatus -from revengai.models.function_threat_score import FunctionThreatScore as FunctionThreatScore from revengai.models.function_type_input import FunctionTypeInput as FunctionTypeInput from revengai.models.function_type_output import FunctionTypeOutput as FunctionTypeOutput from revengai.models.functions_detail_response import FunctionsDetailResponse as FunctionsDetailResponse diff --git a/revengai/api/__init__.py b/revengai/api/__init__.py index 0199858..e4e7c87 100644 --- a/revengai/api/__init__.py +++ b/revengai/api/__init__.py @@ -18,7 +18,6 @@ from revengai.api.functions_data_types_api import FunctionsDataTypesApi from revengai.api.functions_decompilation_api import FunctionsDecompilationApi from revengai.api.functions_renaming_history_api import FunctionsRenamingHistoryApi -from revengai.api.functions_threat_score_api import FunctionsThreatScoreApi from revengai.api.models_api import ModelsApi from revengai.api.search_api import SearchApi diff --git a/revengai/api/analyses_results_metadata_api.py b/revengai/api/analyses_results_metadata_api.py index 08d0d58..c5f09ae 100644 --- a/revengai/api/analyses_results_metadata_api.py +++ b/revengai/api/analyses_results_metadata_api.py @@ -20,7 +20,6 @@ from typing_extensions import Annotated from revengai.models.base_response_analysis_functions import BaseResponseAnalysisFunctions from revengai.models.base_response_analysis_tags import BaseResponseAnalysisTags -from revengai.models.base_response_analysis_threat_score_data import BaseResponseAnalysisThreatScoreData from revengai.models.base_response_capabilities import BaseResponseCapabilities from revengai.models.base_response_communities import BaseResponseCommunities from revengai.models.base_response_list_sbom import BaseResponseListSBOM @@ -1771,282 +1770,6 @@ def _get_tags_serialize( - @validate_call - def get_threat_score( - self, - analysis_id: StrictInt, - authorization: Annotated[Optional[StrictStr], Field(description="API Key bearer token")] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], - Annotated[StrictFloat, Field(gt=0)] - ] - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> BaseResponseAnalysisThreatScoreData: - """Gets the threat score found in the analysis - - - :param analysis_id: (required) - :type analysis_id: int - :param authorization: API Key bearer token - :type authorization: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_threat_score_serialize( - analysis_id=analysis_id, - authorization=authorization, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - '200': "BaseResponseAnalysisThreatScoreData", - '422': "BaseResponse", - } - response_data = self.api_client.call_api( - *_param, - _request_timeout=_request_timeout - ) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - - @validate_call - def get_threat_score_with_http_info( - self, - analysis_id: StrictInt, - authorization: Annotated[Optional[StrictStr], Field(description="API Key bearer token")] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], - Annotated[StrictFloat, Field(gt=0)] - ] - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[BaseResponseAnalysisThreatScoreData]: - """Gets the threat score found in the analysis - - - :param analysis_id: (required) - :type analysis_id: int - :param authorization: API Key bearer token - :type authorization: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_threat_score_serialize( - analysis_id=analysis_id, - authorization=authorization, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - '200': "BaseResponseAnalysisThreatScoreData", - '422': "BaseResponse", - } - response_data = self.api_client.call_api( - *_param, - _request_timeout=_request_timeout - ) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - - @validate_call - def get_threat_score_without_preload_content( - self, - analysis_id: StrictInt, - authorization: Annotated[Optional[StrictStr], Field(description="API Key bearer token")] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], - Annotated[StrictFloat, Field(gt=0)] - ] - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Gets the threat score found in the analysis - - - :param analysis_id: (required) - :type analysis_id: int - :param authorization: API Key bearer token - :type authorization: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_threat_score_serialize( - analysis_id=analysis_id, - authorization=authorization, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - '200': "BaseResponseAnalysisThreatScoreData", - '422': "BaseResponse", - } - response_data = self.api_client.call_api( - *_param, - _request_timeout=_request_timeout - ) - return response_data.response - - - def _get_threat_score_serialize( - self, - analysis_id, - authorization, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = { - } - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if analysis_id is not None: - _path_params['analysis_id'] = analysis_id - # process the query parameters - # process the header parameters - if authorization is not None: - _header_params['authorization'] = authorization - # process the form parameters - # process the body parameter - - - # set the HTTP header `Accept` - if 'Accept' not in _header_params: - _header_params['Accept'] = self.api_client.select_header_accept( - [ - 'application/json' - ] - ) - - - # authentication setting - _auth_settings: List[str] = [ - 'APIKey' - ] - - return self.api_client.param_serialize( - method='GET', - resource_path='/v2/analyses/{analysis_id}/threat_score', - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth - ) - - - - @validate_call def get_vulnerabilities( self, diff --git a/revengai/api/functions_threat_score_api.py b/revengai/api/functions_threat_score_api.py deleted file mode 100644 index 9b0cb93..0000000 --- a/revengai/api/functions_threat_score_api.py +++ /dev/null @@ -1,609 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - -import warnings -from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt -from typing import Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Annotated - -from pydantic import Field, StrictInt, StrictStr -from typing import Optional -from typing_extensions import Annotated -from revengai.models.base_response_function_analysis_threat_score_data import BaseResponseFunctionAnalysisThreatScoreData -from revengai.models.base_response_function_threat_score import BaseResponseFunctionThreatScore - -from revengai.api_client import ApiClient, RequestSerialized -from revengai.api_response import ApiResponse -from revengai.rest import RESTResponseType - - -class FunctionsThreatScoreApi: - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None) -> None: - if api_client is None: - api_client = ApiClient.get_default() - self.api_client = api_client - - - @validate_call - def get_all_function_threat_scores( - self, - analysis_id: StrictInt, - authorization: Annotated[Optional[StrictStr], Field(description="API Key bearer token")] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], - Annotated[StrictFloat, Field(gt=0)] - ] - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> BaseResponseFunctionThreatScore: - """Gets the threat score for all functions - - Calculates the threat score for all functions inside of an analysis - - :param analysis_id: (required) - :type analysis_id: int - :param authorization: API Key bearer token - :type authorization: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_all_function_threat_scores_serialize( - analysis_id=analysis_id, - authorization=authorization, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - '200': "BaseResponseFunctionThreatScore", - '422': "BaseResponse", - } - response_data = self.api_client.call_api( - *_param, - _request_timeout=_request_timeout - ) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - - @validate_call - def get_all_function_threat_scores_with_http_info( - self, - analysis_id: StrictInt, - authorization: Annotated[Optional[StrictStr], Field(description="API Key bearer token")] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], - Annotated[StrictFloat, Field(gt=0)] - ] - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[BaseResponseFunctionThreatScore]: - """Gets the threat score for all functions - - Calculates the threat score for all functions inside of an analysis - - :param analysis_id: (required) - :type analysis_id: int - :param authorization: API Key bearer token - :type authorization: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_all_function_threat_scores_serialize( - analysis_id=analysis_id, - authorization=authorization, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - '200': "BaseResponseFunctionThreatScore", - '422': "BaseResponse", - } - response_data = self.api_client.call_api( - *_param, - _request_timeout=_request_timeout - ) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - - @validate_call - def get_all_function_threat_scores_without_preload_content( - self, - analysis_id: StrictInt, - authorization: Annotated[Optional[StrictStr], Field(description="API Key bearer token")] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], - Annotated[StrictFloat, Field(gt=0)] - ] - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Gets the threat score for all functions - - Calculates the threat score for all functions inside of an analysis - - :param analysis_id: (required) - :type analysis_id: int - :param authorization: API Key bearer token - :type authorization: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_all_function_threat_scores_serialize( - analysis_id=analysis_id, - authorization=authorization, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - '200': "BaseResponseFunctionThreatScore", - '422': "BaseResponse", - } - response_data = self.api_client.call_api( - *_param, - _request_timeout=_request_timeout - ) - return response_data.response - - - def _get_all_function_threat_scores_serialize( - self, - analysis_id, - authorization, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = { - } - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if analysis_id is not None: - _path_params['analysis_id'] = analysis_id - # process the query parameters - # process the header parameters - if authorization is not None: - _header_params['authorization'] = authorization - # process the form parameters - # process the body parameter - - - # set the HTTP header `Accept` - if 'Accept' not in _header_params: - _header_params['Accept'] = self.api_client.select_header_accept( - [ - 'application/json' - ] - ) - - - # authentication setting - _auth_settings: List[str] = [ - 'APIKey' - ] - - return self.api_client.param_serialize( - method='GET', - resource_path='/v2/analyses/{analysis_id}/functions/threat_score', - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth - ) - - - - - @validate_call - def get_individual_function_threat_score( - self, - analysis_id: StrictInt, - function_id: StrictInt, - authorization: Annotated[Optional[StrictStr], Field(description="API Key bearer token")] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], - Annotated[StrictFloat, Field(gt=0)] - ] - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> BaseResponseFunctionAnalysisThreatScoreData: - """Gets the threat score analysis - - - :param analysis_id: (required) - :type analysis_id: int - :param function_id: (required) - :type function_id: int - :param authorization: API Key bearer token - :type authorization: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_individual_function_threat_score_serialize( - analysis_id=analysis_id, - function_id=function_id, - authorization=authorization, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - '200': "BaseResponseFunctionAnalysisThreatScoreData", - '422': "BaseResponse", - } - response_data = self.api_client.call_api( - *_param, - _request_timeout=_request_timeout - ) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ).data - - - @validate_call - def get_individual_function_threat_score_with_http_info( - self, - analysis_id: StrictInt, - function_id: StrictInt, - authorization: Annotated[Optional[StrictStr], Field(description="API Key bearer token")] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], - Annotated[StrictFloat, Field(gt=0)] - ] - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> ApiResponse[BaseResponseFunctionAnalysisThreatScoreData]: - """Gets the threat score analysis - - - :param analysis_id: (required) - :type analysis_id: int - :param function_id: (required) - :type function_id: int - :param authorization: API Key bearer token - :type authorization: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_individual_function_threat_score_serialize( - analysis_id=analysis_id, - function_id=function_id, - authorization=authorization, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - '200': "BaseResponseFunctionAnalysisThreatScoreData", - '422': "BaseResponse", - } - response_data = self.api_client.call_api( - *_param, - _request_timeout=_request_timeout - ) - response_data.read() - return self.api_client.response_deserialize( - response_data=response_data, - response_types_map=_response_types_map, - ) - - - @validate_call - def get_individual_function_threat_score_without_preload_content( - self, - analysis_id: StrictInt, - function_id: StrictInt, - authorization: Annotated[Optional[StrictStr], Field(description="API Key bearer token")] = None, - _request_timeout: Union[ - None, - Annotated[StrictFloat, Field(gt=0)], - Tuple[ - Annotated[StrictFloat, Field(gt=0)], - Annotated[StrictFloat, Field(gt=0)] - ] - ] = None, - _request_auth: Optional[Dict[StrictStr, Any]] = None, - _content_type: Optional[StrictStr] = None, - _headers: Optional[Dict[StrictStr, Any]] = None, - _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, - ) -> RESTResponseType: - """Gets the threat score analysis - - - :param analysis_id: (required) - :type analysis_id: int - :param function_id: (required) - :type function_id: int - :param authorization: API Key bearer token - :type authorization: str - :param _request_timeout: timeout setting for this request. If one - number provided, it will be total request - timeout. It can also be a pair (tuple) of - (connection, read) timeouts. - :type _request_timeout: int, tuple(int, int), optional - :param _request_auth: set to override the auth_settings for an a single - request; this effectively ignores the - authentication in the spec for a single request. - :type _request_auth: dict, optional - :param _content_type: force content-type for the request. - :type _content_type: str, Optional - :param _headers: set to override the headers for a single - request; this effectively ignores the headers - in the spec for a single request. - :type _headers: dict, optional - :param _host_index: set to override the host_index for a single - request; this effectively ignores the host_index - in the spec for a single request. - :type _host_index: int, optional - :return: Returns the result object. - """ # noqa: E501 - - _param = self._get_individual_function_threat_score_serialize( - analysis_id=analysis_id, - function_id=function_id, - authorization=authorization, - _request_auth=_request_auth, - _content_type=_content_type, - _headers=_headers, - _host_index=_host_index - ) - - _response_types_map: Dict[str, Optional[str]] = { - '200': "BaseResponseFunctionAnalysisThreatScoreData", - '422': "BaseResponse", - } - response_data = self.api_client.call_api( - *_param, - _request_timeout=_request_timeout - ) - return response_data.response - - - def _get_individual_function_threat_score_serialize( - self, - analysis_id, - function_id, - authorization, - _request_auth, - _content_type, - _headers, - _host_index, - ) -> RequestSerialized: - - _host = None - - _collection_formats: Dict[str, str] = { - } - - _path_params: Dict[str, str] = {} - _query_params: List[Tuple[str, str]] = [] - _header_params: Dict[str, Optional[str]] = _headers or {} - _form_params: List[Tuple[str, str]] = [] - _files: Dict[ - str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] - ] = {} - _body_params: Optional[bytes] = None - - # process the path parameters - if analysis_id is not None: - _path_params['analysis_id'] = analysis_id - if function_id is not None: - _path_params['function_id'] = function_id - # process the query parameters - # process the header parameters - if authorization is not None: - _header_params['authorization'] = authorization - # process the form parameters - # process the body parameter - - - # set the HTTP header `Accept` - if 'Accept' not in _header_params: - _header_params['Accept'] = self.api_client.select_header_accept( - [ - 'application/json' - ] - ) - - - # authentication setting - _auth_settings: List[str] = [ - 'APIKey' - ] - - return self.api_client.param_serialize( - method='GET', - resource_path='/v2/analyses/{analysis_id}/functions/{function_id}/threat_score', - path_params=_path_params, - query_params=_query_params, - header_params=_header_params, - body=_body_params, - post_params=_form_params, - files=_files, - auth_settings=_auth_settings, - collection_formats=_collection_formats, - _host=_host, - _request_auth=_request_auth - ) - - diff --git a/revengai/api_client.py b/revengai/api_client.py index 979eb67..c09ec3e 100644 --- a/revengai/api_client.py +++ b/revengai/api_client.py @@ -90,7 +90,7 @@ def __init__( self.default_headers[header_name] = header_value self.cookie = cookie # Set default User-Agent. - self.user_agent = 'OpenAPI-Generator/v1.89.4/python' + self.user_agent = 'OpenAPI-Generator/v1.91.1/python' self.client_side_validation = configuration.client_side_validation def __enter__(self): diff --git a/revengai/configuration.py b/revengai/configuration.py index 2c98fb9..a6b0a07 100644 --- a/revengai/configuration.py +++ b/revengai/configuration.py @@ -529,8 +529,8 @@ def to_debug_report(self) -> str: return "Python SDK Debug Report:\n"\ "OS: {env}\n"\ "Python Version: {pyversion}\n"\ - "Version of the API: v1.89.4\n"\ - "SDK Package Version: v1.89.4".\ + "Version of the API: v1.91.1\n"\ + "SDK Package Version: v1.91.1".\ format(env=sys.platform, pyversion=sys.version) def get_host_settings(self) -> List[HostSetting]: diff --git a/revengai/models/__init__.py b/revengai/models/__init__.py index 71d13bc..1161b1e 100644 --- a/revengai/models/__init__.py +++ b/revengai/models/__init__.py @@ -27,7 +27,6 @@ from revengai.models.analysis_scope import AnalysisScope from revengai.models.analysis_strings_response import AnalysisStringsResponse from revengai.models.analysis_tags import AnalysisTags -from revengai.models.analysis_threat_score_data import AnalysisThreatScoreData from revengai.models.analysis_update_request import AnalysisUpdateRequest from revengai.models.analysis_update_tags_request import AnalysisUpdateTagsRequest from revengai.models.analysis_update_tags_response import AnalysisUpdateTagsResponse @@ -51,7 +50,6 @@ from revengai.models.base_response_analysis_functions import BaseResponseAnalysisFunctions from revengai.models.base_response_analysis_strings_response import BaseResponseAnalysisStringsResponse from revengai.models.base_response_analysis_tags import BaseResponseAnalysisTags -from revengai.models.base_response_analysis_threat_score_data import BaseResponseAnalysisThreatScoreData from revengai.models.base_response_analysis_update_tags_response import BaseResponseAnalysisUpdateTagsResponse from revengai.models.base_response_basic import BaseResponseBasic from revengai.models.base_response_binary_additional_response import BaseResponseBinaryAdditionalResponse @@ -78,7 +76,6 @@ from revengai.models.base_response_dict import BaseResponseDict from revengai.models.base_response_dynamic_execution_status import BaseResponseDynamicExecutionStatus from revengai.models.base_response_external_response import BaseResponseExternalResponse -from revengai.models.base_response_function_analysis_threat_score_data import BaseResponseFunctionAnalysisThreatScoreData from revengai.models.base_response_function_blocks_response import BaseResponseFunctionBlocksResponse from revengai.models.base_response_function_capability_response import BaseResponseFunctionCapabilityResponse from revengai.models.base_response_function_data_types import BaseResponseFunctionDataTypes @@ -86,7 +83,6 @@ from revengai.models.base_response_function_search_response import BaseResponseFunctionSearchResponse from revengai.models.base_response_function_strings_response import BaseResponseFunctionStringsResponse from revengai.models.base_response_function_task_response import BaseResponseFunctionTaskResponse -from revengai.models.base_response_function_threat_score import BaseResponseFunctionThreatScore from revengai.models.base_response_functions_detail_response import BaseResponseFunctionsDetailResponse from revengai.models.base_response_generate_function_data_types import BaseResponseGenerateFunctionDataTypes from revengai.models.base_response_generation_status_list import BaseResponseGenerationStatusList @@ -187,7 +183,6 @@ from revengai.models.file_hashes import FileHashes from revengai.models.file_metadata import FileMetadata from revengai.models.filters import Filters -from revengai.models.function_analysis_threat_score_data import FunctionAnalysisThreatScoreData from revengai.models.function_batch_ann import FunctionBatchAnn from revengai.models.function_block_destination_response import FunctionBlockDestinationResponse from revengai.models.function_block_response import FunctionBlockResponse @@ -224,7 +219,6 @@ from revengai.models.function_strings_response import FunctionStringsResponse from revengai.models.function_task_response import FunctionTaskResponse from revengai.models.function_task_status import FunctionTaskStatus -from revengai.models.function_threat_score import FunctionThreatScore from revengai.models.function_type_input import FunctionTypeInput from revengai.models.function_type_output import FunctionTypeOutput from revengai.models.functions_detail_response import FunctionsDetailResponse diff --git a/revengai/models/analysis_threat_score_data.py b/revengai/models/analysis_threat_score_data.py deleted file mode 100644 index 938bb89..0000000 --- a/revengai/models/analysis_threat_score_data.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt -from typing import Any, ClassVar, Dict, List, Union -from typing import Optional, Set -from typing_extensions import Self - -class AnalysisThreatScoreData(BaseModel): - """ - AnalysisThreatScoreData - """ # noqa: E501 - min: Union[StrictFloat, StrictInt] = Field(description="The minimum value for the analysis score") - max: Union[StrictFloat, StrictInt] = Field(description="The maximum value for the analysis score") - average: Union[StrictFloat, StrictInt] = Field(description="The average value for the analysis score") - upper: Union[StrictFloat, StrictInt] = Field(description="The upper limit for the analysis score") - lower: Union[StrictFloat, StrictInt] = Field(description="The lower limit for the analysis score") - malware_count: StrictInt = Field(description="Number of malware binaries used in threat score calculation") - benign_count: StrictInt = Field(description="Number of benign binaries used in threat score calculation") - __properties: ClassVar[List[str]] = ["min", "max", "average", "upper", "lower", "malware_count", "benign_count"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of AnalysisThreatScoreData from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([ - ]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of AnalysisThreatScoreData from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({ - "min": obj.get("min"), - "max": obj.get("max"), - "average": obj.get("average"), - "upper": obj.get("upper"), - "lower": obj.get("lower"), - "malware_count": obj.get("malware_count"), - "benign_count": obj.get("benign_count") - }) - return _obj - - diff --git a/revengai/models/base_response_analysis_threat_score_data.py b/revengai/models/base_response_analysis_threat_score_data.py deleted file mode 100644 index 6df9185..0000000 --- a/revengai/models/base_response_analysis_threat_score_data.py +++ /dev/null @@ -1,125 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from revengai.models.analysis_threat_score_data import AnalysisThreatScoreData -from revengai.models.error_model import ErrorModel -from revengai.models.meta_model import MetaModel -from typing import Optional, Set -from typing_extensions import Self - -class BaseResponseAnalysisThreatScoreData(BaseModel): - """ - BaseResponseAnalysisThreatScoreData - """ # noqa: E501 - status: Optional[StrictBool] = Field(default=True, description="Response status on whether the request succeeded") - data: Optional[AnalysisThreatScoreData] = None - message: Optional[StrictStr] = None - errors: Optional[List[ErrorModel]] = None - meta: Optional[MetaModel] = Field(default=None, description="Metadata") - __properties: ClassVar[List[str]] = ["status", "data", "message", "errors", "meta"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of BaseResponseAnalysisThreatScoreData from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([ - ]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of data - if self.data: - _dict['data'] = self.data.to_dict() - # override the default output from pydantic by calling `to_dict()` of each item in errors (list) - _items = [] - if self.errors: - for _item_errors in self.errors: - if _item_errors: - _items.append(_item_errors.to_dict()) - _dict['errors'] = _items - # override the default output from pydantic by calling `to_dict()` of meta - if self.meta: - _dict['meta'] = self.meta.to_dict() - # set to None if data (nullable) is None - # and model_fields_set contains the field - if self.data is None and "data" in self.model_fields_set: - _dict['data'] = None - - # set to None if message (nullable) is None - # and model_fields_set contains the field - if self.message is None and "message" in self.model_fields_set: - _dict['message'] = None - - # set to None if errors (nullable) is None - # and model_fields_set contains the field - if self.errors is None and "errors" in self.model_fields_set: - _dict['errors'] = None - - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of BaseResponseAnalysisThreatScoreData from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({ - "status": obj.get("status") if obj.get("status") is not None else True, - "data": AnalysisThreatScoreData.from_dict(obj["data"]) if obj.get("data") is not None else None, - "message": obj.get("message"), - "errors": [ErrorModel.from_dict(_item) for _item in obj["errors"]] if obj.get("errors") is not None else None, - "meta": MetaModel.from_dict(obj["meta"]) if obj.get("meta") is not None else None - }) - return _obj - - diff --git a/revengai/models/base_response_function_analysis_threat_score_data.py b/revengai/models/base_response_function_analysis_threat_score_data.py deleted file mode 100644 index 8698ede..0000000 --- a/revengai/models/base_response_function_analysis_threat_score_data.py +++ /dev/null @@ -1,125 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from revengai.models.error_model import ErrorModel -from revengai.models.function_analysis_threat_score_data import FunctionAnalysisThreatScoreData -from revengai.models.meta_model import MetaModel -from typing import Optional, Set -from typing_extensions import Self - -class BaseResponseFunctionAnalysisThreatScoreData(BaseModel): - """ - BaseResponseFunctionAnalysisThreatScoreData - """ # noqa: E501 - status: Optional[StrictBool] = Field(default=True, description="Response status on whether the request succeeded") - data: Optional[FunctionAnalysisThreatScoreData] = None - message: Optional[StrictStr] = None - errors: Optional[List[ErrorModel]] = None - meta: Optional[MetaModel] = Field(default=None, description="Metadata") - __properties: ClassVar[List[str]] = ["status", "data", "message", "errors", "meta"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of BaseResponseFunctionAnalysisThreatScoreData from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([ - ]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of data - if self.data: - _dict['data'] = self.data.to_dict() - # override the default output from pydantic by calling `to_dict()` of each item in errors (list) - _items = [] - if self.errors: - for _item_errors in self.errors: - if _item_errors: - _items.append(_item_errors.to_dict()) - _dict['errors'] = _items - # override the default output from pydantic by calling `to_dict()` of meta - if self.meta: - _dict['meta'] = self.meta.to_dict() - # set to None if data (nullable) is None - # and model_fields_set contains the field - if self.data is None and "data" in self.model_fields_set: - _dict['data'] = None - - # set to None if message (nullable) is None - # and model_fields_set contains the field - if self.message is None and "message" in self.model_fields_set: - _dict['message'] = None - - # set to None if errors (nullable) is None - # and model_fields_set contains the field - if self.errors is None and "errors" in self.model_fields_set: - _dict['errors'] = None - - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of BaseResponseFunctionAnalysisThreatScoreData from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({ - "status": obj.get("status") if obj.get("status") is not None else True, - "data": FunctionAnalysisThreatScoreData.from_dict(obj["data"]) if obj.get("data") is not None else None, - "message": obj.get("message"), - "errors": [ErrorModel.from_dict(_item) for _item in obj["errors"]] if obj.get("errors") is not None else None, - "meta": MetaModel.from_dict(obj["meta"]) if obj.get("meta") is not None else None - }) - return _obj - - diff --git a/revengai/models/base_response_function_threat_score.py b/revengai/models/base_response_function_threat_score.py deleted file mode 100644 index 180fc47..0000000 --- a/revengai/models/base_response_function_threat_score.py +++ /dev/null @@ -1,125 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr -from typing import Any, ClassVar, Dict, List, Optional -from revengai.models.error_model import ErrorModel -from revengai.models.function_threat_score import FunctionThreatScore -from revengai.models.meta_model import MetaModel -from typing import Optional, Set -from typing_extensions import Self - -class BaseResponseFunctionThreatScore(BaseModel): - """ - BaseResponseFunctionThreatScore - """ # noqa: E501 - status: Optional[StrictBool] = Field(default=True, description="Response status on whether the request succeeded") - data: Optional[FunctionThreatScore] = None - message: Optional[StrictStr] = None - errors: Optional[List[ErrorModel]] = None - meta: Optional[MetaModel] = Field(default=None, description="Metadata") - __properties: ClassVar[List[str]] = ["status", "data", "message", "errors", "meta"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of BaseResponseFunctionThreatScore from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([ - ]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of data - if self.data: - _dict['data'] = self.data.to_dict() - # override the default output from pydantic by calling `to_dict()` of each item in errors (list) - _items = [] - if self.errors: - for _item_errors in self.errors: - if _item_errors: - _items.append(_item_errors.to_dict()) - _dict['errors'] = _items - # override the default output from pydantic by calling `to_dict()` of meta - if self.meta: - _dict['meta'] = self.meta.to_dict() - # set to None if data (nullable) is None - # and model_fields_set contains the field - if self.data is None and "data" in self.model_fields_set: - _dict['data'] = None - - # set to None if message (nullable) is None - # and model_fields_set contains the field - if self.message is None and "message" in self.model_fields_set: - _dict['message'] = None - - # set to None if errors (nullable) is None - # and model_fields_set contains the field - if self.errors is None and "errors" in self.model_fields_set: - _dict['errors'] = None - - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of BaseResponseFunctionThreatScore from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({ - "status": obj.get("status") if obj.get("status") is not None else True, - "data": FunctionThreatScore.from_dict(obj["data"]) if obj.get("data") is not None else None, - "message": obj.get("message"), - "errors": [ErrorModel.from_dict(_item) for _item in obj["errors"]] if obj.get("errors") is not None else None, - "meta": MetaModel.from_dict(obj["meta"]) if obj.get("meta") is not None else None - }) - return _obj - - diff --git a/revengai/models/function_analysis_threat_score_data.py b/revengai/models/function_analysis_threat_score_data.py deleted file mode 100644 index f240cf3..0000000 --- a/revengai/models/function_analysis_threat_score_data.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt -from typing import Any, ClassVar, Dict, List, Union -from typing import Optional, Set -from typing_extensions import Self - -class FunctionAnalysisThreatScoreData(BaseModel): - """ - FunctionAnalysisThreatScoreData - """ # noqa: E501 - min: Union[StrictFloat, StrictInt] = Field(description="The minimum value for the analysis score") - max: Union[StrictFloat, StrictInt] = Field(description="The maximum value for the analysis score") - average: Union[StrictFloat, StrictInt] = Field(description="The average value for the analysis score") - upper: Union[StrictFloat, StrictInt] = Field(description="The upper limit for the analysis score") - lower: Union[StrictFloat, StrictInt] = Field(description="The lower limit for the analysis score") - malware_count: StrictInt = Field(description="Number of malware binaries used in threat score calculation") - benign_count: StrictInt = Field(description="Number of benign binaries used in threat score calculation") - __properties: ClassVar[List[str]] = ["min", "max", "average", "upper", "lower", "malware_count", "benign_count"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FunctionAnalysisThreatScoreData from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([ - ]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FunctionAnalysisThreatScoreData from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({ - "min": obj.get("min"), - "max": obj.get("max"), - "average": obj.get("average"), - "upper": obj.get("upper"), - "lower": obj.get("lower"), - "malware_count": obj.get("malware_count"), - "benign_count": obj.get("benign_count") - }) - return _obj - - diff --git a/revengai/models/function_threat_score.py b/revengai/models/function_threat_score.py deleted file mode 100644 index 1d40008..0000000 --- a/revengai/models/function_threat_score.py +++ /dev/null @@ -1,99 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -from __future__ import annotations -import pprint -import re # noqa: F401 -import json - -from pydantic import BaseModel, ConfigDict, Field -from typing import Any, ClassVar, Dict, List -from revengai.models.function_analysis_threat_score_data import FunctionAnalysisThreatScoreData -from typing import Optional, Set -from typing_extensions import Self - -class FunctionThreatScore(BaseModel): - """ - FunctionThreatScore - """ # noqa: E501 - results: Dict[str, FunctionAnalysisThreatScoreData] = Field(description="The results of the function threat") - __properties: ClassVar[List[str]] = ["results"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of FunctionThreatScore from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([ - ]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of each value in results (dict) - _field_dict = {} - if self.results: - for _key_results in self.results: - if self.results[_key_results]: - _field_dict[_key_results] = self.results[_key_results].to_dict() - _dict['results'] = _field_dict - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of FunctionThreatScore from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({ - "results": dict( - (_k, FunctionAnalysisThreatScoreData.from_dict(_v)) - for _k, _v in obj["results"].items() - ) - if obj.get("results") is not None - else None - }) - return _obj - - diff --git a/test/test_analyses_results_metadata_api.py b/test/test_analyses_results_metadata_api.py index 123bbd1..9563c4a 100644 --- a/test/test_analyses_results_metadata_api.py +++ b/test/test_analyses_results_metadata_api.py @@ -67,13 +67,6 @@ def test_get_tags(self) -> None: """ pass - def test_get_threat_score(self) -> None: - """Test case for get_threat_score - - Gets the threat score found in the analysis - """ - pass - def test_get_vulnerabilities(self) -> None: """Test case for get_vulnerabilities diff --git a/test/test_analysis_threat_score_data.py b/test/test_analysis_threat_score_data.py deleted file mode 100644 index 0394341..0000000 --- a/test/test_analysis_threat_score_data.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from revengai.models.analysis_threat_score_data import AnalysisThreatScoreData - -class TestAnalysisThreatScoreData(unittest.TestCase): - """AnalysisThreatScoreData unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> AnalysisThreatScoreData: - """Test AnalysisThreatScoreData - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included """ - # uncomment below to create an instance of `AnalysisThreatScoreData` - """ - model = AnalysisThreatScoreData() - if include_optional: - return AnalysisThreatScoreData( - min = 1.337, - max = 1.337, - average = 1.337, - upper = 1.337, - lower = 1.337, - malware_count = 56, - benign_count = 56 - ) - else: - return AnalysisThreatScoreData( - min = 1.337, - max = 1.337, - average = 1.337, - upper = 1.337, - lower = 1.337, - malware_count = 56, - benign_count = 56, - ) - """ - - def testAnalysisThreatScoreData(self): - """Test AnalysisThreatScoreData""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_base_response_analysis_threat_score_data.py b/test/test_base_response_analysis_threat_score_data.py deleted file mode 100644 index 2f4b21f..0000000 --- a/test/test_base_response_analysis_threat_score_data.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from revengai.models.base_response_analysis_threat_score_data import BaseResponseAnalysisThreatScoreData - -class TestBaseResponseAnalysisThreatScoreData(unittest.TestCase): - """BaseResponseAnalysisThreatScoreData unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> BaseResponseAnalysisThreatScoreData: - """Test BaseResponseAnalysisThreatScoreData - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included """ - # uncomment below to create an instance of `BaseResponseAnalysisThreatScoreData` - """ - model = BaseResponseAnalysisThreatScoreData() - if include_optional: - return BaseResponseAnalysisThreatScoreData( - status = True, - data = revengai.models.analysis_threat_score_data.AnalysisThreatScoreData( - min = 1.337, - max = 1.337, - average = 1.337, - upper = 1.337, - lower = 1.337, - malware_count = 56, - benign_count = 56, ), - message = '', - errors = [ - revengai.models.error_model.ErrorModel( - code = '', - message = '', ) - ], - meta = revengai.models.meta_model.MetaModel( - pagination = revengai.models.pagination_model.PaginationModel( - page_size = 56, - page_number = 56, - has_next_page = True, ), ) - ) - else: - return BaseResponseAnalysisThreatScoreData( - ) - """ - - def testBaseResponseAnalysisThreatScoreData(self): - """Test BaseResponseAnalysisThreatScoreData""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_base_response_function_analysis_threat_score_data.py b/test/test_base_response_function_analysis_threat_score_data.py deleted file mode 100644 index a4568a5..0000000 --- a/test/test_base_response_function_analysis_threat_score_data.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from revengai.models.base_response_function_analysis_threat_score_data import BaseResponseFunctionAnalysisThreatScoreData - -class TestBaseResponseFunctionAnalysisThreatScoreData(unittest.TestCase): - """BaseResponseFunctionAnalysisThreatScoreData unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> BaseResponseFunctionAnalysisThreatScoreData: - """Test BaseResponseFunctionAnalysisThreatScoreData - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included """ - # uncomment below to create an instance of `BaseResponseFunctionAnalysisThreatScoreData` - """ - model = BaseResponseFunctionAnalysisThreatScoreData() - if include_optional: - return BaseResponseFunctionAnalysisThreatScoreData( - status = True, - data = revengai.models.function_analysis_threat_score_data.FunctionAnalysisThreatScoreData( - min = 1.337, - max = 1.337, - average = 1.337, - upper = 1.337, - lower = 1.337, - malware_count = 56, - benign_count = 56, ), - message = '', - errors = [ - revengai.models.error_model.ErrorModel( - code = '', - message = '', ) - ], - meta = revengai.models.meta_model.MetaModel( - pagination = revengai.models.pagination_model.PaginationModel( - page_size = 56, - page_number = 56, - has_next_page = True, ), ) - ) - else: - return BaseResponseFunctionAnalysisThreatScoreData( - ) - """ - - def testBaseResponseFunctionAnalysisThreatScoreData(self): - """Test BaseResponseFunctionAnalysisThreatScoreData""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_base_response_function_threat_score.py b/test/test_base_response_function_threat_score.py deleted file mode 100644 index cef1d46..0000000 --- a/test/test_base_response_function_threat_score.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from revengai.models.base_response_function_threat_score import BaseResponseFunctionThreatScore - -class TestBaseResponseFunctionThreatScore(unittest.TestCase): - """BaseResponseFunctionThreatScore unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> BaseResponseFunctionThreatScore: - """Test BaseResponseFunctionThreatScore - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included """ - # uncomment below to create an instance of `BaseResponseFunctionThreatScore` - """ - model = BaseResponseFunctionThreatScore() - if include_optional: - return BaseResponseFunctionThreatScore( - status = True, - data = revengai.models.function_threat_score.FunctionThreatScore( - results = { - 'key' : revengai.models.function_analysis_threat_score_data.FunctionAnalysisThreatScoreData( - min = 1.337, - max = 1.337, - average = 1.337, - upper = 1.337, - lower = 1.337, - malware_count = 56, - benign_count = 56, ) - }, ), - message = '', - errors = [ - revengai.models.error_model.ErrorModel( - code = '', - message = '', ) - ], - meta = revengai.models.meta_model.MetaModel( - pagination = revengai.models.pagination_model.PaginationModel( - page_size = 56, - page_number = 56, - has_next_page = True, ), ) - ) - else: - return BaseResponseFunctionThreatScore( - ) - """ - - def testBaseResponseFunctionThreatScore(self): - """Test BaseResponseFunctionThreatScore""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_function_analysis_threat_score_data.py b/test/test_function_analysis_threat_score_data.py deleted file mode 100644 index 981e076..0000000 --- a/test/test_function_analysis_threat_score_data.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from revengai.models.function_analysis_threat_score_data import FunctionAnalysisThreatScoreData - -class TestFunctionAnalysisThreatScoreData(unittest.TestCase): - """FunctionAnalysisThreatScoreData unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FunctionAnalysisThreatScoreData: - """Test FunctionAnalysisThreatScoreData - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included """ - # uncomment below to create an instance of `FunctionAnalysisThreatScoreData` - """ - model = FunctionAnalysisThreatScoreData() - if include_optional: - return FunctionAnalysisThreatScoreData( - min = 1.337, - max = 1.337, - average = 1.337, - upper = 1.337, - lower = 1.337, - malware_count = 56, - benign_count = 56 - ) - else: - return FunctionAnalysisThreatScoreData( - min = 1.337, - max = 1.337, - average = 1.337, - upper = 1.337, - lower = 1.337, - malware_count = 56, - benign_count = 56, - ) - """ - - def testFunctionAnalysisThreatScoreData(self): - """Test FunctionAnalysisThreatScoreData""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_function_threat_score.py b/test/test_function_threat_score.py deleted file mode 100644 index 1097435..0000000 --- a/test/test_function_threat_score.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from revengai.models.function_threat_score import FunctionThreatScore - -class TestFunctionThreatScore(unittest.TestCase): - """FunctionThreatScore unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def make_instance(self, include_optional) -> FunctionThreatScore: - """Test FunctionThreatScore - include_optional is a boolean, when False only required - params are included, when True both required and - optional params are included """ - # uncomment below to create an instance of `FunctionThreatScore` - """ - model = FunctionThreatScore() - if include_optional: - return FunctionThreatScore( - results = { - 'key' : revengai.models.function_analysis_threat_score_data.FunctionAnalysisThreatScoreData( - min = 1.337, - max = 1.337, - average = 1.337, - upper = 1.337, - lower = 1.337, - malware_count = 56, - benign_count = 56, ) - } - ) - else: - return FunctionThreatScore( - results = { - 'key' : revengai.models.function_analysis_threat_score_data.FunctionAnalysisThreatScoreData( - min = 1.337, - max = 1.337, - average = 1.337, - upper = 1.337, - lower = 1.337, - malware_count = 56, - benign_count = 56, ) - }, - ) - """ - - def testFunctionThreatScore(self): - """Test FunctionThreatScore""" - # inst_req_only = self.make_instance(include_optional=False) - # inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_functions_threat_score_api.py b/test/test_functions_threat_score_api.py deleted file mode 100644 index 1011685..0000000 --- a/test/test_functions_threat_score_api.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding: utf-8 - -""" - RevEng.AI API - - RevEng.AI is Similarity Search Engine for executable binaries - - Generated by OpenAPI Generator (https://openapi-generator.tech) - - Do not edit the class manually. -""" # noqa: E501 - - -import unittest - -from revengai.api.functions_threat_score_api import FunctionsThreatScoreApi - - -class TestFunctionsThreatScoreApi(unittest.TestCase): - """FunctionsThreatScoreApi unit test stubs""" - - def setUp(self) -> None: - self.api = FunctionsThreatScoreApi() - - def tearDown(self) -> None: - pass - - def test_get_all_function_threat_scores(self) -> None: - """Test case for get_all_function_threat_scores - - Gets the threat score for all functions - """ - pass - - def test_get_individual_function_threat_score(self) -> None: - """Test case for get_individual_function_threat_score - - Gets the threat score analysis - """ - pass - - -if __name__ == '__main__': - unittest.main()