From 0ecf79a1ba881ad8434ea075d8f1f853b7d735e7 Mon Sep 17 00:00:00 2001 From: Youjung Kim <126618609+ykim-akamai@users.noreply.github.com> Date: Tue, 15 Apr 2025 07:26:38 -0700 Subject: [PATCH 01/34] Update test region capability for LDE (#530) * update submodule * update capability in test region and miscellaneous test fixes * remove error fixture and add retry * lint * pr comments * pr comments --- test/integration/login_client/test_login_client.py | 1 + test/integration/models/domain/test_domain.py | 2 -- test/integration/models/linode/test_linode.py | 12 ++++++------ test/integration/models/lke/test_lke.py | 12 ++++++++---- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/test/integration/login_client/test_login_client.py b/test/integration/login_client/test_login_client.py index ccbeb1976..24519346c 100644 --- a/test/integration/login_client/test_login_client.py +++ b/test/integration/login_client/test_login_client.py @@ -97,6 +97,7 @@ def test_linode_login_client_generate_login_url_with_scope(linode_login_client): assert "scopes=linodes%3Aread_write" in url +@pytest.mark.skip("Endpoint may be deprecated") def test_linode_login_client_expire_token( linode_login_client, test_oauth_client ): diff --git a/test/integration/models/domain/test_domain.py b/test/integration/models/domain/test_domain.py index 36ecbb0dc..9dc180a6e 100644 --- a/test/integration/models/domain/test_domain.py +++ b/test/integration/models/domain/test_domain.py @@ -23,8 +23,6 @@ def test_save_null_values_excluded(test_linode_client, test_domain): domain.master_ips = ["127.0.0.1"] res = domain.save() - assert res - def test_zone_file_view(test_linode_client, test_domain): domain = test_linode_client.load(Domain, test_domain.id) diff --git a/test/integration/models/linode/test_linode.py b/test/integration/models/linode/test_linode.py index d97a8294a..835330810 100644 --- a/test/integration/models/linode/test_linode.py +++ b/test/integration/models/linode/test_linode.py @@ -9,7 +9,6 @@ import pytest -from linode_api4 import VPCIPAddress from linode_api4.errors import ApiError from linode_api4.objects import ( Config, @@ -181,7 +180,7 @@ def create_linode_for_long_running_tests(test_linode_client, e2e_test_firewall): def linode_with_disk_encryption(test_linode_client, request): client = test_linode_client - target_region = get_region(client, {"Disk Encryption"}) + target_region = get_region(client, {"LA Disk Encryption"}) label = get_test_label(length=8) disk_encryption = request.param @@ -236,7 +235,7 @@ def test_linode_transfer(test_linode_client, linode_with_volume_firewall): def test_linode_rebuild(test_linode_client): client = test_linode_client - region = get_region(client, {"Disk Encryption"}) + region = get_region(client, {"LA Disk Encryption"}) label = get_test_label() + "_rebuild" @@ -535,6 +534,7 @@ def test_linode_create_disk(test_linode_client, linode_for_disk_tests): assert disk.linode_id == linode.id +@pytest.mark.flaky(reruns=3, reruns_delay=2) def test_linode_instance_password(create_linode_for_pass_reset): linode = create_linode_for_pass_reset[0] password = create_linode_for_pass_reset[1] @@ -775,10 +775,10 @@ def test_create_vpc( assert vpc_range_ip.address_range == "10.0.0.5/32" assert not vpc_range_ip.active + # TODO:: Add `VPCIPAddress.filters.linode_id == linode.id` filter back + # Attempt to resolve the IP from /vpcs/ips - all_vpc_ips = test_linode_client.vpcs.ips( - VPCIPAddress.filters.linode_id == linode.id - ) + all_vpc_ips = test_linode_client.vpcs.ips() assert all_vpc_ips[0].dict == vpc_ip.dict # Test getting the ips under this specific VPC diff --git a/test/integration/models/lke/test_lke.py b/test/integration/models/lke/test_lke.py index e4c941c16..e0a9eafb1 100644 --- a/test/integration/models/lke/test_lke.py +++ b/test/integration/models/lke/test_lke.py @@ -32,7 +32,9 @@ def lke_cluster(test_linode_client): node_type = test_linode_client.linode.types()[1] # g6-standard-1 version = test_linode_client.lke.versions()[0] - region = get_region(test_linode_client, {"Kubernetes", "Disk Encryption"}) + region = get_region( + test_linode_client, {"Kubernetes", "LA Disk Encryption"} + ) node_pools = test_linode_client.lke.node_pool(node_type, 3) label = get_test_label() + "_cluster" @@ -115,7 +117,9 @@ def lke_cluster_with_labels_and_taints(test_linode_client): def lke_cluster_with_apl(test_linode_client): version = test_linode_client.lke.versions()[0] - region = get_region(test_linode_client, {"Kubernetes", "Disk Encryption"}) + region = get_region( + test_linode_client, {"Kubernetes", "LA Disk Encryption"} + ) # NOTE: g6-dedicated-4 is the minimum APL-compatible Linode type node_pools = test_linode_client.lke.node_pool("g6-dedicated-4", 3) @@ -145,7 +149,7 @@ def lke_cluster_enterprise(test_linode_client): )[0] region = get_region( - test_linode_client, {"Kubernetes Enterprise", "Disk Encryption"} + test_linode_client, {"Kubernetes Enterprise", "LA Disk Encryption"} ) node_pools = test_linode_client.lke.node_pool( @@ -204,7 +208,7 @@ def _to_comparable(p: LKENodePool) -> Dict[str, Any]: assert _to_comparable(cluster.pools[0]) == _to_comparable(pool) - assert pool.disk_encryption == InstanceDiskEncryptionType.enabled + assert pool.disk_encryption == InstanceDiskEncryptionType.disabled def test_cluster_dashboard_url_view(lke_cluster): From b2eff93cbe7b023c7fc35fdd6d0c73bc6866189c Mon Sep 17 00:00:00 2001 From: Lena Garber <114949949+lgarber-akamai@users.noreply.github.com> Date: Mon, 28 Apr 2025 11:33:58 -0400 Subject: [PATCH 02/34] Implement JSONObject put_class ClassVar (#534) --- linode_api4/objects/account.py | 4 +- linode_api4/objects/base.py | 26 +++++++----- linode_api4/objects/linode.py | 12 +++--- linode_api4/objects/serializable.py | 24 +++++++++-- test/unit/objects/serializable_test.py | 55 +++++++++++++++++++++++++- 5 files changed, 99 insertions(+), 22 deletions(-) diff --git a/linode_api4/objects/account.py b/linode_api4/objects/account.py index 375e5fc03..c7318d871 100644 --- a/linode_api4/objects/account.py +++ b/linode_api4/objects/account.py @@ -601,7 +601,7 @@ def entity(self): ) return self.cls(self._client, self.id) - def _serialize(self): + def _serialize(self, *args, **kwargs): """ Returns this grant in as JSON the api will accept. This is only relevant in the context of UserGrants.save @@ -668,7 +668,7 @@ def _grants_dict(self): return grants - def _serialize(self): + def _serialize(self, *args, **kwargs): """ Returns the user grants in as JSON the api will accept. This is only relevant in the context of UserGrants.save diff --git a/linode_api4/objects/base.py b/linode_api4/objects/base.py index 6c9b1bece..c9a622edc 100644 --- a/linode_api4/objects/base.py +++ b/linode_api4/objects/base.py @@ -114,6 +114,9 @@ def _flatten_base_subclass(obj: "Base") -> Optional[Dict[str, Any]]: @property def dict(self): + return self._serialize() + + def _serialize(self, is_put: bool = False) -> Dict[str, Any]: result = vars(self).copy() cls = type(self) @@ -123,7 +126,7 @@ def dict(self): elif isinstance(v, list): result[k] = [ ( - item.dict + item._serialize(is_put=is_put) if isinstance(item, (cls, JSONObject)) else ( self._flatten_base_subclass(item) @@ -136,7 +139,7 @@ def dict(self): elif isinstance(v, Base): result[k] = self._flatten_base_subclass(v) elif isinstance(v, JSONObject): - result[k] = v.dict + result[k] = v._serialize(is_put=is_put) return result @@ -278,9 +281,9 @@ def save(self, force=True) -> bool: data[key] = None # Ensure we serialize any values that may not be already serialized - data = _flatten_request_body_recursive(data) + data = _flatten_request_body_recursive(data, is_put=True) else: - data = self._serialize() + data = self._serialize(is_put=True) resp = self._client.put(type(self).api_endpoint, model=self, data=data) @@ -316,7 +319,7 @@ def invalidate(self): self._set("_populated", False) - def _serialize(self): + def _serialize(self, is_put: bool = False): """ A helper method to build a dict of all mutable Properties of this object @@ -345,7 +348,7 @@ def _serialize(self): # Resolve the underlying IDs of results for k, v in result.items(): - result[k] = _flatten_request_body_recursive(v) + result[k] = _flatten_request_body_recursive(v, is_put=is_put) return result @@ -503,7 +506,7 @@ def make_instance(cls, id, client, parent_id=None, json=None): return Base.make(id, client, cls, parent_id=parent_id, json=json) -def _flatten_request_body_recursive(data: Any) -> Any: +def _flatten_request_body_recursive(data: Any, is_put: bool = False) -> Any: """ This is a helper recursively flatten the given data for use in an API request body. @@ -515,15 +518,18 @@ def _flatten_request_body_recursive(data: Any) -> Any: """ if isinstance(data, dict): - return {k: _flatten_request_body_recursive(v) for k, v in data.items()} + return { + k: _flatten_request_body_recursive(v, is_put=is_put) + for k, v in data.items() + } if isinstance(data, list): - return [_flatten_request_body_recursive(v) for v in data] + return [_flatten_request_body_recursive(v, is_put=is_put) for v in data] if isinstance(data, Base): return data.id if isinstance(data, MappedObject) or issubclass(type(data), JSONObject): - return data.dict + return data._serialize(is_put=is_put) return data diff --git a/linode_api4/objects/linode.py b/linode_api4/objects/linode.py index 46af5d970..c70dd7965 100644 --- a/linode_api4/objects/linode.py +++ b/linode_api4/objects/linode.py @@ -400,7 +400,7 @@ class ConfigInterface(JSONObject): def __repr__(self): return f"Interface: {self.purpose}" - def _serialize(self): + def _serialize(self, *args, **kwargs): purpose_formats = { "public": {"purpose": "public", "primary": self.primary}, "vlan": { @@ -510,16 +510,16 @@ def _populate(self, json): self._set("devices", MappedObject(**devices)) - def _serialize(self): + def _serialize(self, is_put: bool = False): """ Overrides _serialize to transform interfaces into json """ - partial = DerivedBase._serialize(self) + partial = DerivedBase._serialize(self, is_put=is_put) interfaces = [] for c in self.interfaces: if isinstance(c, ConfigInterface): - interfaces.append(c._serialize()) + interfaces.append(c._serialize(is_put=is_put)) else: interfaces.append(c) @@ -1927,8 +1927,8 @@ def _populate(self, json): ndist = [Image(self._client, d) for d in self.images] self._set("images", ndist) - def _serialize(self): - dct = Base._serialize(self) + def _serialize(self, is_put: bool = False): + dct = Base._serialize(self, is_put=is_put) dct["images"] = [d.id for d in self.images] return dct diff --git a/linode_api4/objects/serializable.py b/linode_api4/objects/serializable.py index fea682f43..e33179a60 100644 --- a/linode_api4/objects/serializable.py +++ b/linode_api4/objects/serializable.py @@ -1,5 +1,5 @@ import inspect -from dataclasses import dataclass +from dataclasses import dataclass, fields from enum import Enum from types import SimpleNamespace from typing import ( @@ -9,6 +9,7 @@ List, Optional, Set, + Type, Union, get_args, get_origin, @@ -71,6 +72,13 @@ class JSONObject(metaclass=JSONFilterableMetaclass): are None. """ + put_class: ClassVar[Optional[Type["JSONObject"]]] = None + """ + An alternative JSONObject class to use as the schema for PUT requests. + This prevents read-only fields from being included in PUT request bodies, + which in theory will result in validation errors from the API. + """ + def __init__(self): raise NotImplementedError( "JSONObject is not intended to be constructed directly" @@ -154,11 +162,17 @@ def from_json(cls, json: Dict[str, Any]) -> Optional["JSONObject"]: return obj - def _serialize(self) -> Dict[str, Any]: + def _serialize(self, is_put: bool = False) -> Dict[str, Any]: """ Serializes this object into a JSON dict. """ cls = type(self) + + if is_put and cls.put_class is not None: + cls = cls.put_class + + cls_field_keys = {field.name for field in fields(cls)} + type_hints = get_type_hints(cls) def attempt_serialize(value: Any) -> Any: @@ -166,7 +180,7 @@ def attempt_serialize(value: Any) -> Any: Attempts to serialize the given value, else returns the value unchanged. """ if issubclass(type(value), JSONObject): - return value._serialize() + return value._serialize(is_put=is_put) return value @@ -175,6 +189,10 @@ def should_include(key: str, value: Any) -> bool: Returns whether the given key/value pair should be included in the resulting dict. """ + # During PUT operations, keys not present in the put_class should be excluded + if key not in cls_field_keys: + return False + if cls.include_none_values or key in cls.always_include: return True diff --git a/test/unit/objects/serializable_test.py b/test/unit/objects/serializable_test.py index a15f108b4..9a775ccf1 100644 --- a/test/unit/objects/serializable_test.py +++ b/test/unit/objects/serializable_test.py @@ -2,7 +2,7 @@ from test.unit.base import ClientBaseCase from typing import Optional -from linode_api4 import JSONObject +from linode_api4 import Base, JSONObject, Property class JSONObjectTest(ClientBaseCase): @@ -47,3 +47,56 @@ class Foo(JSONObject): assert foo["foo"] == "test" assert foo["bar"] == "test2" assert foo["baz"] == "test3" + + def test_serialize_put_class(self): + """ + Ensures that the JSONObject put_class ClassVar functions as expected. + """ + + @dataclass + class SubStructOptions(JSONObject): + test1: Optional[str] = None + + @dataclass + class SubStruct(JSONObject): + put_class = SubStructOptions + + test1: str = "" + test2: int = 0 + + class Model(Base): + api_endpoint = "/foo/bar" + + properties = { + "id": Property(identifier=True), + "substruct": Property(mutable=True, json_object=SubStruct), + } + + mock_response = { + "id": 123, + "substruct": { + "test1": "abc", + "test2": 321, + }, + } + + with self.mock_get(mock_response) as mock: + obj = self.client.load(Model, 123) + + assert mock.called + + assert obj.id == 123 + assert obj.substruct.test1 == "abc" + assert obj.substruct.test2 == 321 + + obj.substruct.test1 = "cba" + + with self.mock_put(mock_response) as mock: + obj.save() + + assert mock.called + assert mock.call_data == { + "substruct": { + "test1": "cba", + } + } From c72280ed7106e66e84a74c0b1510f4e58a77a54b Mon Sep 17 00:00:00 2001 From: Youjung Kim <126618609+ykim-akamai@users.noreply.github.com> Date: Mon, 28 Apr 2025 10:37:07 -0700 Subject: [PATCH 03/34] add retry to flaky test, safety around bucket delete (#538) --- test/integration/models/linode/test_linode.py | 1 + test/integration/models/object_storage/test_obj.py | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/test/integration/models/linode/test_linode.py b/test/integration/models/linode/test_linode.py index 835330810..ade4ca5ed 100644 --- a/test/integration/models/linode/test_linode.py +++ b/test/integration/models/linode/test_linode.py @@ -364,6 +364,7 @@ def test_linode_resize(create_linode_for_long_running_tests): assert linode.status == "running" +@pytest.mark.flaky(reruns=3, reruns_delay=2) def test_linode_resize_with_class( test_linode_client, create_linode_for_long_running_tests ): diff --git a/test/integration/models/object_storage/test_obj.py b/test/integration/models/object_storage/test_obj.py index 33ce8dfbe..e52f85e0f 100644 --- a/test/integration/models/object_storage/test_obj.py +++ b/test/integration/models/object_storage/test_obj.py @@ -1,5 +1,6 @@ import time from test.integration.conftest import get_region +from test.integration.helpers import send_request_when_resource_available import pytest @@ -38,7 +39,7 @@ def bucket( ) yield bucket - bucket.delete() + send_request_when_resource_available(timeout=100, func=bucket.delete) @pytest.fixture(scope="session") @@ -63,7 +64,8 @@ def bucket_with_endpoint( ) yield bucket - bucket.delete() + + send_request_when_resource_available(timeout=100, func=bucket.delete) @pytest.fixture(scope="session") From c449113cd05fc8885c8470df6541395547050915 Mon Sep 17 00:00:00 2001 From: Youjung Kim <126618609+ykim-akamai@users.noreply.github.com> Date: Tue, 29 Apr 2025 10:51:59 -0700 Subject: [PATCH 04/34] update obj test region (#539) --- test/integration/models/object_storage/test_obj.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/integration/models/object_storage/test_obj.py b/test/integration/models/object_storage/test_obj.py index e52f85e0f..047dfbdb4 100644 --- a/test/integration/models/object_storage/test_obj.py +++ b/test/integration/models/object_storage/test_obj.py @@ -1,5 +1,4 @@ import time -from test.integration.conftest import get_region from test.integration.helpers import send_request_when_resource_available import pytest @@ -19,7 +18,7 @@ @pytest.fixture(scope="session") def region(test_linode_client: LinodeClient): - return get_region(test_linode_client, {"Object Storage"}).id + return "us-southeast" # uncomment get_region(test_linode_client, {"Object Storage"}).id @pytest.fixture(scope="session") From 8cc03eea14a20ae02fe8c6d8b21ddbdccbd536a2 Mon Sep 17 00:00:00 2001 From: Zhiwei Liang <121905282+zliang-akamai@users.noreply.github.com> Date: Wed, 7 May 2025 03:19:38 -0400 Subject: [PATCH 05/34] Trusted publisher for PyPI (#536) --- .github/workflows/publish-pypi.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish-pypi.yaml b/.github/workflows/publish-pypi.yaml index d5338b7a7..027ac5298 100644 --- a/.github/workflows/publish-pypi.yaml +++ b/.github/workflows/publish-pypi.yaml @@ -5,7 +5,11 @@ on: types: [ published ] jobs: pypi-release: + permissions: + # IMPORTANT: this permission is mandatory for trusted publishing + id-token: write runs-on: ubuntu-latest + environment: pypi-release steps: - name: Checkout uses: actions/checkout@v4 @@ -25,5 +29,3 @@ jobs: - name: Publish the release artifacts to PyPI uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # pin@release/v1.12.4 - with: - password: ${{ secrets.PYPI_API_TOKEN }} From 751180e2a70b363cc94a5c2382b5af518231ab8e Mon Sep 17 00:00:00 2001 From: Ye Chen <127243817+yec-akamai@users.noreply.github.com> Date: Mon, 12 May 2025 12:20:55 -0400 Subject: [PATCH 06/34] Project: Limits Visibility M1 (#544) * Support Object Storage Quota Limits Visibility (#531) * obj quota * add comment * build json object * add obj quotas int tests (#535) * Update Object Storage quota doc link (#543) --------- Co-authored-by: Youjung Kim <126618609+ykim-akamai@users.noreply.github.com> --- linode_api4/groups/object_storage.py | 16 +++++ linode_api4/objects/object_storage.py | 49 ++++++++++++++++ test/fixtures/object-storage_quotas.json | 25 ++++++++ ...t-storage_quotas_obj-objects-us-ord-1.json | 9 +++ ...age_quotas_obj-objects-us-ord-1_usage.json | 4 ++ .../models/object_storage/test_obj_quotas.py | 58 +++++++++++++++++++ test/unit/objects/object_storage_test.py | 51 ++++++++++++++++ 7 files changed, 212 insertions(+) create mode 100644 test/fixtures/object-storage_quotas.json create mode 100644 test/fixtures/object-storage_quotas_obj-objects-us-ord-1.json create mode 100644 test/fixtures/object-storage_quotas_obj-objects-us-ord-1_usage.json create mode 100644 test/integration/models/object_storage/test_obj_quotas.py diff --git a/linode_api4/groups/object_storage.py b/linode_api4/groups/object_storage.py index eb6a296b7..5ffab3ffc 100644 --- a/linode_api4/groups/object_storage.py +++ b/linode_api4/groups/object_storage.py @@ -21,6 +21,7 @@ ObjectStorageCluster, ObjectStorageKeyPermission, ObjectStorageKeys, + ObjectStorageQuota, ) from linode_api4.util import drop_null_keys @@ -517,3 +518,18 @@ def object_url_create( ) return MappedObject(**result) + + def quotas(self, *filters): + """ + Lists the active ObjectStorage-related quotas applied to your account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-quotas + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Object Storage Quotas that matched the query. + :rtype: PaginatedList of ObjectStorageQuota + """ + return self.client._get_and_filter(ObjectStorageQuota, *filters) diff --git a/linode_api4/objects/object_storage.py b/linode_api4/objects/object_storage.py index be1fd0cc7..29eba2b06 100644 --- a/linode_api4/objects/object_storage.py +++ b/linode_api4/objects/object_storage.py @@ -51,6 +51,16 @@ class ObjectStorageEndpoint(JSONObject): s3_endpoint: Optional[str] = None +@dataclass +class ObjectStorageQuotaUsage(JSONObject): + """ + ObjectStorageQuotaUsage contains the fields of an object storage quota usage information. + """ + + quota_limit: int = 0 + usage: int = 0 + + class ObjectStorageType(Base): """ An ObjectStorageType represents the structure of a valid Object Storage type. @@ -566,3 +576,42 @@ class ObjectStorageKeys(Base): "limited": Property(), "regions": Property(unordered=True), } + + +class ObjectStorageQuota(Base): + """ + An Object Storage related quota information on your account. + Object Storage Quota related features are under v4beta and may not currently be available to all users. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-quota + """ + + api_endpoint = "/object-storage/quotas/{quota_id}" + id_attribute = "quota_id" + + properties = { + "quota_id": Property(identifier=True), + "quota_name": Property(), + "endpoint_type": Property(), + "s3_endpoint": Property(), + "description": Property(), + "quota_limit": Property(), + "resource_metric": Property(), + } + + def usage(self): + """ + Gets usage data for a specific ObjectStorage Quota resource you can have on your account and the current usage for that resource. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-quota-usage + + :returns: The Object Storage Quota usage. + :rtype: ObjectStorageQuotaUsage + """ + + result = self._client.get( + f"{type(self).api_endpoint}/usage", + model=self, + ) + + return ObjectStorageQuotaUsage.from_json(result) diff --git a/test/fixtures/object-storage_quotas.json b/test/fixtures/object-storage_quotas.json new file mode 100644 index 000000000..e831d7303 --- /dev/null +++ b/test/fixtures/object-storage_quotas.json @@ -0,0 +1,25 @@ +{ + "data": [ + { + "quota_id": "obj-objects-us-ord-1", + "quota_name": "Object Storage Maximum Objects", + "description": "Maximum number of Objects this customer is allowed to have on this endpoint.", + "endpoint_type": "E1", + "s3_endpoint": "us-iad-1.linodeobjects.com", + "quota_limit": 50, + "resource_metric": "object" + }, + { + "quota_id": "obj-bucket-us-ord-1", + "quota_name": "Object Storage Maximum Buckets", + "description": "Maximum number of buckets this customer is allowed to have on this endpoint.", + "endpoint_type": "E1", + "s3_endpoint": "us-iad-1.linodeobjects.com", + "quota_limit": 50, + "resource_metric": "bucket" + } + ], + "page": 1, + "pages": 1, + "results": 2 +} \ No newline at end of file diff --git a/test/fixtures/object-storage_quotas_obj-objects-us-ord-1.json b/test/fixtures/object-storage_quotas_obj-objects-us-ord-1.json new file mode 100644 index 000000000..e01d743c3 --- /dev/null +++ b/test/fixtures/object-storage_quotas_obj-objects-us-ord-1.json @@ -0,0 +1,9 @@ +{ + "quota_id": "obj-objects-us-ord-1", + "quota_name": "Object Storage Maximum Objects", + "description": "Maximum number of Objects this customer is allowed to have on this endpoint.", + "endpoint_type": "E1", + "s3_endpoint": "us-iad-1.linodeobjects.com", + "quota_limit": 50, + "resource_metric": "object" +} \ No newline at end of file diff --git a/test/fixtures/object-storage_quotas_obj-objects-us-ord-1_usage.json b/test/fixtures/object-storage_quotas_obj-objects-us-ord-1_usage.json new file mode 100644 index 000000000..59b306044 --- /dev/null +++ b/test/fixtures/object-storage_quotas_obj-objects-us-ord-1_usage.json @@ -0,0 +1,4 @@ +{ + "quota_limit": 100, + "usage": 10 +} diff --git a/test/integration/models/object_storage/test_obj_quotas.py b/test/integration/models/object_storage/test_obj_quotas.py new file mode 100644 index 000000000..b1beade44 --- /dev/null +++ b/test/integration/models/object_storage/test_obj_quotas.py @@ -0,0 +1,58 @@ +from linode_api4.objects.object_storage import ( + ObjectStorageQuota, + ObjectStorageQuotaUsage, +) + + +def test_list_obj_storage_quotas(test_linode_client): + quotas = test_linode_client.object_storage.quotas() + + target_quota_id = "obj-buckets-us-sea-1.linodeobjects.com" + + found_quota = None + for quota in quotas: + if quota.quota_id == target_quota_id: + found_quota = quota + break + + assert ( + found_quota is not None + ), f"Quota with ID {target_quota_id} not found." + + assert found_quota.quota_id == "obj-buckets-us-sea-1.linodeobjects.com" + assert found_quota.quota_name == "max_buckets" + assert found_quota.endpoint_type == "E1" + assert found_quota.s3_endpoint == "us-sea-1.linodeobjects.com" + assert ( + found_quota.description + == "Maximum number of buckets this customer is allowed to have on this endpoint" + ) + assert found_quota.quota_limit == 1000 + assert found_quota.resource_metric == "bucket" + + +def test_get_obj_storage_quota(test_linode_client): + quota_id = "obj-objects-us-ord-1.linodeobjects.com" + quota = test_linode_client.load(ObjectStorageQuota, quota_id) + + assert quota.quota_id == "obj-objects-us-ord-1.linodeobjects.com" + assert quota.quota_name == "max_objects" + assert quota.endpoint_type == "E1" + assert quota.s3_endpoint == "us-ord-1.linodeobjects.com" + assert ( + quota.description + == "Maximum number of objects this customer is allowed to have on this endpoint" + ) + assert quota.quota_limit == 100000000 + assert quota.resource_metric == "object" + + +def test_get_obj_storage_quota_usage(test_linode_client): + quota_id = "obj-objects-us-ord-1.linodeobjects.com" + quota = test_linode_client.load(ObjectStorageQuota, quota_id) + + quota_usage = quota.usage() + + assert isinstance(quota_usage, ObjectStorageQuotaUsage) + assert quota_usage.quota_limit == 100000000 + assert quota_usage.usage >= 0 diff --git a/test/unit/objects/object_storage_test.py b/test/unit/objects/object_storage_test.py index 396813b3d..b7ff7e49c 100644 --- a/test/unit/objects/object_storage_test.py +++ b/test/unit/objects/object_storage_test.py @@ -6,6 +6,7 @@ ObjectStorageACL, ObjectStorageBucket, ObjectStorageCluster, + ObjectStorageQuota, ) @@ -284,3 +285,53 @@ def test_object_acl_config_update(self): "name": "example", }, ) + + def test_quota_get_and_list(self): + """ + Test that you can get and list an Object storage quota and usage information. + """ + quota = ObjectStorageQuota( + self.client, + "obj-objects-us-ord-1", + ) + + self.assertIsNotNone(quota) + self.assertEqual(quota.quota_id, "obj-objects-us-ord-1") + self.assertEqual(quota.quota_name, "Object Storage Maximum Objects") + self.assertEqual( + quota.description, + "Maximum number of Objects this customer is allowed to have on this endpoint.", + ) + self.assertEqual(quota.endpoint_type, "E1") + self.assertEqual(quota.s3_endpoint, "us-iad-1.linodeobjects.com") + self.assertEqual(quota.quota_limit, 50) + self.assertEqual(quota.resource_metric, "object") + + quota_usage_url = "/object-storage/quotas/obj-objects-us-ord-1/usage" + with self.mock_get(quota_usage_url) as m: + usage = quota.usage() + self.assertIsNotNone(usage) + self.assertEqual(m.call_url, quota_usage_url) + self.assertEqual(usage.quota_limit, 100) + self.assertEqual(usage.usage, 10) + + quota_list_url = "/object-storage/quotas" + with self.mock_get(quota_list_url) as m: + quotas = self.client.object_storage.quotas() + self.assertIsNotNone(quotas) + self.assertEqual(m.call_url, quota_list_url) + self.assertEqual(len(quotas), 2) + self.assertEqual(quotas[0].quota_id, "obj-objects-us-ord-1") + self.assertEqual( + quotas[0].quota_name, "Object Storage Maximum Objects" + ) + self.assertEqual( + quotas[0].description, + "Maximum number of Objects this customer is allowed to have on this endpoint.", + ) + self.assertEqual(quotas[0].endpoint_type, "E1") + self.assertEqual( + quotas[0].s3_endpoint, "us-iad-1.linodeobjects.com" + ) + self.assertEqual(quotas[0].quota_limit, 50) + self.assertEqual(quotas[0].resource_metric, "object") From ace528b2bed8957ffb9371e563f78459ced3f718 Mon Sep 17 00:00:00 2001 From: Youjung Kim <126618609+ykim-akamai@users.noreply.github.com> Date: Mon, 12 May 2025 10:52:43 -0700 Subject: [PATCH 07/34] Update test assertion (#546) --- test/integration/models/lke/test_lke.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/integration/models/lke/test_lke.py b/test/integration/models/lke/test_lke.py index e0a9eafb1..3486485d6 100644 --- a/test/integration/models/lke/test_lke.py +++ b/test/integration/models/lke/test_lke.py @@ -208,7 +208,10 @@ def _to_comparable(p: LKENodePool) -> Dict[str, Any]: assert _to_comparable(cluster.pools[0]) == _to_comparable(pool) - assert pool.disk_encryption == InstanceDiskEncryptionType.disabled + assert pool.disk_encryption in ( + InstanceDiskEncryptionType.enabled, + InstanceDiskEncryptionType.disabled, + ) def test_cluster_dashboard_url_view(lke_cluster): From a8fa1d7ca3bd95fc88c67391fc56992a8d1fccec Mon Sep 17 00:00:00 2001 From: Ye Chen <127243817+yec-akamai@users.noreply.github.com> Date: Mon, 12 May 2025 13:53:36 -0400 Subject: [PATCH 08/34] improve test (#547) --- .../models/object_storage/test_obj_quotas.py | 65 ++++++++----------- 1 file changed, 26 insertions(+), 39 deletions(-) diff --git a/test/integration/models/object_storage/test_obj_quotas.py b/test/integration/models/object_storage/test_obj_quotas.py index b1beade44..10a546bc7 100644 --- a/test/integration/models/object_storage/test_obj_quotas.py +++ b/test/integration/models/object_storage/test_obj_quotas.py @@ -1,58 +1,45 @@ +import pytest + from linode_api4.objects.object_storage import ( ObjectStorageQuota, ObjectStorageQuotaUsage, ) -def test_list_obj_storage_quotas(test_linode_client): +def test_list_and_get_obj_storage_quotas(test_linode_client): quotas = test_linode_client.object_storage.quotas() - target_quota_id = "obj-buckets-us-sea-1.linodeobjects.com" - - found_quota = None - for quota in quotas: - if quota.quota_id == target_quota_id: - found_quota = quota - break - - assert ( - found_quota is not None - ), f"Quota with ID {target_quota_id} not found." - - assert found_quota.quota_id == "obj-buckets-us-sea-1.linodeobjects.com" - assert found_quota.quota_name == "max_buckets" - assert found_quota.endpoint_type == "E1" - assert found_quota.s3_endpoint == "us-sea-1.linodeobjects.com" - assert ( - found_quota.description - == "Maximum number of buckets this customer is allowed to have on this endpoint" - ) - assert found_quota.quota_limit == 1000 - assert found_quota.resource_metric == "bucket" + if len(quotas) < 1: + pytest.skip("No available quota for testing. Skipping now...") + found_quota = quotas[0] -def test_get_obj_storage_quota(test_linode_client): - quota_id = "obj-objects-us-ord-1.linodeobjects.com" - quota = test_linode_client.load(ObjectStorageQuota, quota_id) - - assert quota.quota_id == "obj-objects-us-ord-1.linodeobjects.com" - assert quota.quota_name == "max_objects" - assert quota.endpoint_type == "E1" - assert quota.s3_endpoint == "us-ord-1.linodeobjects.com" - assert ( - quota.description - == "Maximum number of objects this customer is allowed to have on this endpoint" + get_quota = test_linode_client.load( + ObjectStorageQuota, found_quota.quota_id ) - assert quota.quota_limit == 100000000 - assert quota.resource_metric == "object" + + assert found_quota.quota_id == get_quota.quota_id + assert found_quota.quota_name == get_quota.quota_name + assert found_quota.endpoint_type == get_quota.endpoint_type + assert found_quota.s3_endpoint == get_quota.s3_endpoint + assert found_quota.description == get_quota.description + assert found_quota.quota_limit == get_quota.quota_limit + assert found_quota.resource_metric == get_quota.resource_metric def test_get_obj_storage_quota_usage(test_linode_client): - quota_id = "obj-objects-us-ord-1.linodeobjects.com" + quotas = test_linode_client.object_storage.quotas() + + if len(quotas) < 1: + pytest.skip("No available quota for testing. Skipping now...") + + quota_id = quotas[0].quota_id quota = test_linode_client.load(ObjectStorageQuota, quota_id) quota_usage = quota.usage() assert isinstance(quota_usage, ObjectStorageQuotaUsage) - assert quota_usage.quota_limit == 100000000 - assert quota_usage.usage >= 0 + assert quota_usage.quota_limit >= 0 + + if quota_usage.usage is not None: + assert quota_usage.usage >= 0 From 40c16306c705155a4901dd92139583ba33c6be55 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 May 2025 10:56:27 -0400 Subject: [PATCH 09/34] build(deps): bump slackapi/slack-github-action from 2.0.0 to 2.1.0 (#548) Bumps [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) from 2.0.0 to 2.1.0. - [Release notes](https://github.com/slackapi/slack-github-action/releases) - [Commits](https://github.com/slackapi/slack-github-action/compare/v2.0.0...v2.1.0) --- updated-dependencies: - dependency-name: slackapi/slack-github-action dependency-version: 2.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/e2e-test.yml | 4 ++-- .github/workflows/nightly-smoke-tests.yml | 2 +- .github/workflows/release-notify-slack.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml index c0ccc8e87..d08999645 100644 --- a/.github/workflows/e2e-test.yml +++ b/.github/workflows/e2e-test.yml @@ -232,7 +232,7 @@ jobs: steps: - name: Notify Slack id: main_message - uses: slackapi/slack-github-action@v2.0.0 + uses: slackapi/slack-github-action@v2.1.0 with: method: chat.postMessage token: ${{ secrets.SLACK_BOT_TOKEN }} @@ -264,7 +264,7 @@ jobs: - name: Test summary thread if: success() - uses: slackapi/slack-github-action@v2.0.0 + uses: slackapi/slack-github-action@v2.1.0 with: method: chat.postMessage token: ${{ secrets.SLACK_BOT_TOKEN }} diff --git a/.github/workflows/nightly-smoke-tests.yml b/.github/workflows/nightly-smoke-tests.yml index fc48ee010..3f6083a98 100644 --- a/.github/workflows/nightly-smoke-tests.yml +++ b/.github/workflows/nightly-smoke-tests.yml @@ -45,7 +45,7 @@ jobs: - name: Notify Slack if: always() && github.repository == 'linode/linode_api4-python' - uses: slackapi/slack-github-action@v2.0.0 + uses: slackapi/slack-github-action@v2.1.0 with: method: chat.postMessage token: ${{ secrets.SLACK_BOT_TOKEN }} diff --git a/.github/workflows/release-notify-slack.yml b/.github/workflows/release-notify-slack.yml index ea1a4da68..f2739e988 100644 --- a/.github/workflows/release-notify-slack.yml +++ b/.github/workflows/release-notify-slack.yml @@ -11,7 +11,7 @@ jobs: steps: - name: Notify Slack - Main Message id: main_message - uses: slackapi/slack-github-action@v2.0.0 + uses: slackapi/slack-github-action@v2.1.0 with: method: chat.postMessage token: ${{ secrets.SLACK_BOT_TOKEN }} From 3a1ec42866ba04345a98834727d2cc4a3a1b267f Mon Sep 17 00:00:00 2001 From: Ye Chen <127243817+yec-akamai@users.noreply.github.com> Date: Tue, 20 May 2025 14:49:14 -0400 Subject: [PATCH 10/34] Drop LA and v4beta notice for Limits Visibility (#550) --- linode_api4/objects/object_storage.py | 1 - 1 file changed, 1 deletion(-) diff --git a/linode_api4/objects/object_storage.py b/linode_api4/objects/object_storage.py index 29eba2b06..a2e61405f 100644 --- a/linode_api4/objects/object_storage.py +++ b/linode_api4/objects/object_storage.py @@ -581,7 +581,6 @@ class ObjectStorageKeys(Base): class ObjectStorageQuota(Base): """ An Object Storage related quota information on your account. - Object Storage Quota related features are under v4beta and may not currently be available to all users. API documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-quota """ From 032f29408c13d00e1b985f19f82eed7669ce7437 Mon Sep 17 00:00:00 2001 From: Erik Zilber Date: Wed, 21 May 2025 13:42:38 -0400 Subject: [PATCH 11/34] Proj/configurable db params (#553) * Added support for DB Configurable Params (#527) * Added support for DB Configurable Params features * Added unit tests for config endpoints * Added more unit tests and removed config_update methods * Removed stale fields and updated unit tests * Add integration tests * Add integration tests * remove unused var * remove comments and update test case * update test case * remove assertion * Added support for custom JSON field names in dataclasses * Minor integration test fixes * add get and list test cases --------- Co-authored-by: Youjung Kim * Add / prefix to URL in mysql_config_options and postgresql_config_options API calls (#542) * add test coverage for nullable fields (#541) --------- Co-authored-by: Youjung Kim Co-authored-by: Lena Garber <114949949+lgarber-akamai@users.noreply.github.com> Co-authored-by: Youjung Kim <126618609+ykim-akamai@users.noreply.github.com> --- linode_api4/groups/database.py | 54 +- linode_api4/objects/database.py | 151 ++- linode_api4/objects/serializable.py | 18 +- test/fixtures/databases_mysql_config.json | 230 ++++ test/fixtures/databases_mysql_instances.json | 34 +- .../fixtures/databases_postgresql_config.json | 367 +++++ .../databases_postgresql_instances.json | 55 +- test/integration/models/database/helpers.py | 132 ++ .../models/database/test_database.py | 27 +- .../database/test_database_engine_config.py | 475 +++++++ test/unit/groups/database_test.py | 1188 +++++++++++++++++ test/unit/objects/database_test.py | 229 +++- 12 files changed, 2927 insertions(+), 33 deletions(-) create mode 100644 test/fixtures/databases_mysql_config.json create mode 100644 test/fixtures/databases_postgresql_config.json create mode 100644 test/integration/models/database/helpers.py create mode 100644 test/integration/models/database/test_database_engine_config.py diff --git a/linode_api4/groups/database.py b/linode_api4/groups/database.py index 8110ea888..fec3df929 100644 --- a/linode_api4/groups/database.py +++ b/linode_api4/groups/database.py @@ -1,3 +1,9 @@ +from typing import Any, Dict, Union + +from linode_api4 import ( + MySQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigOptions, +) from linode_api4.errors import UnexpectedResponseError from linode_api4.groups import Group from linode_api4.objects import ( @@ -63,6 +69,26 @@ def engines(self, *filters): """ return self.client._get_and_filter(DatabaseEngine, *filters) + def mysql_config_options(self): + """ + Returns a detailed list of all the configuration options for MySQL Databases. + + API Documentation: TODO + + :returns: The JSON configuration options for MySQL Databases. + """ + return self.client.get("/databases/mysql/config", model=self) + + def postgresql_config_options(self): + """ + Returns a detailed list of all the configuration options for PostgreSQL Databases. + + API Documentation: TODO + + :returns: The JSON configuration options for PostgreSQL Databases. + """ + return self.client.get("/databases/postgresql/config", model=self) + def instances(self, *filters): """ Returns a list of Managed Databases active on this account. @@ -93,7 +119,15 @@ def mysql_instances(self, *filters): """ return self.client._get_and_filter(MySQLDatabase, *filters) - def mysql_create(self, label, region, engine, ltype, **kwargs): + def mysql_create( + self, + label, + region, + engine, + ltype, + engine_config: Union[MySQLDatabaseConfigOptions, Dict[str, Any]] = None, + **kwargs, + ): """ Creates an :any:`MySQLDatabase` on this account with the given label, region, engine, and node type. For example:: @@ -123,6 +157,8 @@ def mysql_create(self, label, region, engine, ltype, **kwargs): :type engine: str or Engine :param ltype: The Linode Type to use for this cluster :type ltype: str or Type + :param engine_config: The configuration options for this MySQL cluster + :type engine_config: Dict[str, Any] or MySQLDatabaseConfigOptions """ params = { @@ -130,6 +166,7 @@ def mysql_create(self, label, region, engine, ltype, **kwargs): "region": region, "engine": engine, "type": ltype, + "engine_config": engine_config, } params.update(kwargs) @@ -216,7 +253,17 @@ def postgresql_instances(self, *filters): """ return self.client._get_and_filter(PostgreSQLDatabase, *filters) - def postgresql_create(self, label, region, engine, ltype, **kwargs): + def postgresql_create( + self, + label, + region, + engine, + ltype, + engine_config: Union[ + PostgreSQLDatabaseConfigOptions, Dict[str, Any] + ] = None, + **kwargs, + ): """ Creates an :any:`PostgreSQLDatabase` on this account with the given label, region, engine, and node type. For example:: @@ -246,6 +293,8 @@ def postgresql_create(self, label, region, engine, ltype, **kwargs): :type engine: str or Engine :param ltype: The Linode Type to use for this cluster :type ltype: str or Type + :param engine_config: The configuration options for this PostgreSQL cluster + :type engine_config: Dict[str, Any] or PostgreSQLDatabaseConfigOptions """ params = { @@ -253,6 +302,7 @@ def postgresql_create(self, label, region, engine, ltype, **kwargs): "region": region, "engine": engine, "type": ltype, + "engine_config": engine_config, } params.update(kwargs) diff --git a/linode_api4/objects/database.py b/linode_api4/objects/database.py index dc9db8471..39249bbf9 100644 --- a/linode_api4/objects/database.py +++ b/linode_api4/objects/database.py @@ -1,6 +1,15 @@ +from dataclasses import dataclass, field +from typing import Optional + from deprecated import deprecated -from linode_api4.objects import Base, DerivedBase, MappedObject, Property +from linode_api4.objects import ( + Base, + DerivedBase, + JSONObject, + MappedObject, + Property, +) class DatabaseType(Base): @@ -128,6 +137,140 @@ class PostgreSQLDatabaseBackup(DatabaseBackup): api_endpoint = "/databases/postgresql/instances/{database_id}/backups/{id}" +@dataclass +class MySQLDatabaseConfigMySQLOptions(JSONObject): + """ + MySQLDatabaseConfigMySQLOptions represents the fields in the mysql + field of the MySQLDatabaseConfigOptions class + """ + + connect_timeout: Optional[int] = None + default_time_zone: Optional[str] = None + group_concat_max_len: Optional[float] = None + information_schema_stats_expiry: Optional[int] = None + innodb_change_buffer_max_size: Optional[int] = None + innodb_flush_neighbors: Optional[int] = None + innodb_ft_min_token_size: Optional[int] = None + innodb_ft_server_stopword_table: Optional[str] = None + innodb_lock_wait_timeout: Optional[int] = None + innodb_log_buffer_size: Optional[int] = None + innodb_online_alter_log_max_size: Optional[int] = None + innodb_read_io_threads: Optional[int] = None + innodb_rollback_on_timeout: Optional[bool] = None + innodb_thread_concurrency: Optional[int] = None + innodb_write_io_threads: Optional[int] = None + interactive_timeout: Optional[int] = None + internal_tmp_mem_storage_engine: Optional[str] = None + max_allowed_packet: Optional[int] = None + max_heap_table_size: Optional[int] = None + net_buffer_length: Optional[int] = None + net_read_timeout: Optional[int] = None + net_write_timeout: Optional[int] = None + sort_buffer_size: Optional[int] = None + sql_mode: Optional[str] = None + sql_require_primary_key: Optional[bool] = None + tmp_table_size: Optional[int] = None + wait_timeout: Optional[int] = None + + +@dataclass +class MySQLDatabaseConfigOptions(JSONObject): + """ + MySQLDatabaseConfigOptions is used to specify + a MySQL Database Cluster's configuration options during its creation. + """ + + mysql: Optional[MySQLDatabaseConfigMySQLOptions] = None + binlog_retention_period: Optional[int] = None + + +@dataclass +class PostgreSQLDatabaseConfigPGLookoutOptions(JSONObject): + """ + PostgreSQLDatabasePGLookoutConfigOptions represents the fields in the pglookout + field of the PostgreSQLDatabasePGConfigOptions class + """ + + max_failover_replication_time_lag: Optional[int] = None + + +@dataclass +class PostgreSQLDatabaseConfigPGOptions(JSONObject): + """ + PostgreSQLDatabasePGConfigOptions represents the fields in the pg + field of the PostgreSQLDatabasePGConfigOptions class + """ + + autovacuum_analyze_scale_factor: Optional[float] = None + autovacuum_analyze_threshold: Optional[int] = None + autovacuum_max_workers: Optional[int] = None + autovacuum_naptime: Optional[int] = None + autovacuum_vacuum_cost_delay: Optional[int] = None + autovacuum_vacuum_cost_limit: Optional[int] = None + autovacuum_vacuum_scale_factor: Optional[float] = None + autovacuum_vacuum_threshold: Optional[int] = None + bgwriter_delay: Optional[int] = None + bgwriter_flush_after: Optional[int] = None + bgwriter_lru_maxpages: Optional[int] = None + bgwriter_lru_multiplier: Optional[float] = None + deadlock_timeout: Optional[int] = None + default_toast_compression: Optional[str] = None + idle_in_transaction_session_timeout: Optional[int] = None + jit: Optional[bool] = None + max_files_per_process: Optional[int] = None + max_locks_per_transaction: Optional[int] = None + max_logical_replication_workers: Optional[int] = None + max_parallel_workers: Optional[int] = None + max_parallel_workers_per_gather: Optional[int] = None + max_pred_locks_per_transaction: Optional[int] = None + max_replication_slots: Optional[int] = None + max_slot_wal_keep_size: Optional[int] = None + max_stack_depth: Optional[int] = None + max_standby_archive_delay: Optional[int] = None + max_standby_streaming_delay: Optional[int] = None + max_wal_senders: Optional[int] = None + max_worker_processes: Optional[int] = None + password_encryption: Optional[str] = None + pg_partman_bgw_interval: Optional[int] = field( + default=None, metadata={"json_key": "pg_partman_bgw.interval"} + ) + pg_partman_bgw_role: Optional[str] = field( + default=None, metadata={"json_key": "pg_partman_bgw.role"} + ) + pg_stat_monitor_pgsm_enable_query_plan: Optional[bool] = field( + default=None, + metadata={"json_key": "pg_stat_monitor.pgsm_enable_query_plan"}, + ) + pg_stat_monitor_pgsm_max_buckets: Optional[int] = field( + default=None, metadata={"json_key": "pg_stat_monitor.pgsm_max_buckets"} + ) + pg_stat_statements_track: Optional[str] = field( + default=None, metadata={"json_key": "pg_stat_statements.track"} + ) + temp_file_limit: Optional[int] = None + timezone: Optional[str] = None + track_activity_query_size: Optional[int] = None + track_commit_timestamp: Optional[str] = None + track_functions: Optional[str] = None + track_io_timing: Optional[str] = None + wal_sender_timeout: Optional[int] = None + wal_writer_delay: Optional[int] = None + + +@dataclass +class PostgreSQLDatabaseConfigOptions(JSONObject): + """ + PostgreSQLDatabaseConfigOptions is used to specify + a PostgreSQL Database Cluster's configuration options during its creation. + """ + + pg: Optional[PostgreSQLDatabaseConfigPGOptions] = None + pg_stat_monitor_enable: Optional[bool] = None + pglookout: Optional[PostgreSQLDatabaseConfigPGLookoutOptions] = None + shared_buffers_percentage: Optional[float] = None + work_mem: Optional[int] = None + + class MySQLDatabase(Base): """ An accessible Managed MySQL Database. @@ -158,6 +301,9 @@ class MySQLDatabase(Base): "updated": Property(volatile=True, is_datetime=True), "updates": Property(mutable=True), "version": Property(), + "engine_config": Property( + mutable=True, json_object=MySQLDatabaseConfigOptions + ), } @property @@ -321,6 +467,9 @@ class PostgreSQLDatabase(Base): "updated": Property(volatile=True, is_datetime=True), "updates": Property(mutable=True), "version": Property(), + "engine_config": Property( + mutable=True, json_object=PostgreSQLDatabaseConfigOptions + ), } @property diff --git a/linode_api4/objects/serializable.py b/linode_api4/objects/serializable.py index e33179a60..1660795aa 100644 --- a/linode_api4/objects/serializable.py +++ b/linode_api4/objects/serializable.py @@ -148,7 +148,7 @@ def _parse_attr(cls, json_value: Any, field_type: type): @classmethod def from_json(cls, json: Dict[str, Any]) -> Optional["JSONObject"]: """ - Creates an instance of this class from a JSON dict. + Creates an instance of this class from a JSON dict, respecting json_key metadata. """ if json is None: return None @@ -157,8 +157,12 @@ def from_json(cls, json: Dict[str, Any]) -> Optional["JSONObject"]: type_hints = get_type_hints(cls) - for k in vars(obj): - setattr(obj, k, cls._parse_attr(json.get(k), type_hints.get(k))) + for f in fields(cls): + json_key = f.metadata.get("json_key", f.name) + field_type = type_hints.get(f.name) + value = json.get(json_key) + parsed_value = cls._parse_attr(value, field_type) + setattr(obj, f.name, parsed_value) return obj @@ -211,7 +215,11 @@ def should_include(key: str, value: Any) -> bool: result = {} - for k, v in vars(self).items(): + for f in fields(self): + k = f.name + json_key = f.metadata.get("json_key", k) + v = getattr(self, k) + if not should_include(k, v): continue @@ -222,7 +230,7 @@ def should_include(key: str, value: Any) -> bool: else: v = attempt_serialize(v) - result[k] = v + result[json_key] = v return result diff --git a/test/fixtures/databases_mysql_config.json b/test/fixtures/databases_mysql_config.json new file mode 100644 index 000000000..9cba0afd4 --- /dev/null +++ b/test/fixtures/databases_mysql_config.json @@ -0,0 +1,230 @@ +{ + "mysql": { + "connect_timeout": { + "description": "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake", + "example": 10, + "maximum": 3600, + "minimum": 2, + "requires_restart": false, + "type": "integer" + }, + "default_time_zone": { + "description": "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", + "example": "+03:00", + "maxLength": 100, + "minLength": 2, + "pattern": "^([-+][\\d:]*|[\\w/]*)$", + "requires_restart": false, + "type": "string" + }, + "group_concat_max_len": { + "description": "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", + "example": 1024, + "maximum": 18446744073709551600, + "minimum": 4, + "requires_restart": false, + "type": "integer" + }, + "information_schema_stats_expiry": { + "description": "The time, in seconds, before cached statistics expire", + "example": 86400, + "maximum": 31536000, + "minimum": 900, + "requires_restart": false, + "type": "integer" + }, + "innodb_change_buffer_max_size": { + "description": "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25", + "example": 30, + "maximum": 50, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_flush_neighbors": { + "description": "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent", + "example": 0, + "maximum": 2, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_ft_min_token_size": { + "description": "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", + "example": 3, + "maximum": 16, + "minimum": 0, + "requires_restart": true, + "type": "integer" + }, + "innodb_ft_server_stopword_table": { + "description": "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", + "example": "db_name/table_name", + "maxLength": 1024, + "pattern": "^.+/.+$", + "requires_restart": false, + "type": [ + "null", + "string" + ] + }, + "innodb_lock_wait_timeout": { + "description": "The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120.", + "example": 50, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "innodb_log_buffer_size": { + "description": "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", + "example": 16777216, + "maximum": 4294967295, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "innodb_online_alter_log_max_size": { + "description": "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", + "example": 134217728, + "maximum": 1099511627776, + "minimum": 65536, + "requires_restart": false, + "type": "integer" + }, + "innodb_read_io_threads": { + "description": "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + "example": 10, + "maximum": 64, + "minimum": 1, + "requires_restart": true, + "type": "integer" + }, + "innodb_rollback_on_timeout": { + "description": "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", + "example": true, + "requires_restart": true, + "type": "boolean" + }, + "innodb_thread_concurrency": { + "description": "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit)", + "example": 10, + "maximum": 1000, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_write_io_threads": { + "description": "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + "example": 10, + "maximum": 64, + "minimum": 1, + "requires_restart": true, + "type": "integer" + }, + "interactive_timeout": { + "description": "The number of seconds the server waits for activity on an interactive connection before closing it.", + "example": 3600, + "maximum": 604800, + "minimum": 30, + "requires_restart": false, + "type": "integer" + }, + "internal_tmp_mem_storage_engine": { + "description": "The storage engine for in-memory internal temporary tables.", + "enum": [ + "TempTable", + "MEMORY" + ], + "example": "TempTable", + "requires_restart": false, + "type": "string" + }, + "max_allowed_packet": { + "description": "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M)", + "example": 67108864, + "maximum": 1073741824, + "minimum": 102400, + "requires_restart": false, + "type": "integer" + }, + "max_heap_table_size": { + "description": "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M)", + "example": 16777216, + "maximum": 1073741824, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "net_buffer_length": { + "description": "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", + "example": 16384, + "maximum": 1048576, + "minimum": 1024, + "requires_restart": true, + "type": "integer" + }, + "net_read_timeout": { + "description": "The number of seconds to wait for more data from a connection before aborting the read.", + "example": 30, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "net_write_timeout": { + "description": "The number of seconds to wait for a block to be written to a connection before aborting the write.", + "example": 30, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "sort_buffer_size": { + "description": "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K)", + "example": 262144, + "maximum": 1073741824, + "minimum": 32768, + "requires_restart": false, + "type": "integer" + }, + "sql_mode": { + "description": "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Akamai default SQL mode (strict, SQL standard compliant) will be assigned.", + "example": "ANSI,TRADITIONAL", + "maxLength": 1024, + "pattern": "^[A-Z_]*(,[A-Z_]+)*$", + "requires_restart": false, + "type": "string" + }, + "sql_require_primary_key": { + "description": "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", + "example": true, + "requires_restart": false, + "type": "boolean" + }, + "tmp_table_size": { + "description": "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M)", + "example": 16777216, + "maximum": 1073741824, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "wait_timeout": { + "description": "The number of seconds the server waits for activity on a noninteractive connection before closing it.", + "example": 28800, + "maximum": 2147483, + "minimum": 1, + "requires_restart": false, + "type": "integer" + } + }, + "binlog_retention_period": { + "description": "The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector.", + "example": 600, + "maximum": 86400, + "minimum": 600, + "requires_restart": false, + "type": "integer" + } +} \ No newline at end of file diff --git a/test/fixtures/databases_mysql_instances.json b/test/fixtures/databases_mysql_instances.json index 2ea73ddc2..d6e3f2e64 100644 --- a/test/fixtures/databases_mysql_instances.json +++ b/test/fixtures/databases_mysql_instances.json @@ -29,7 +29,39 @@ "hour_of_day": 0, "week_of_month": null }, - "version": "8.0.26" + "version": "8.0.26", + "engine_config": { + "binlog_retention_period": 600, + "mysql": { + "connect_timeout": 10, + "default_time_zone": "+03:00", + "group_concat_max_len": 1024, + "information_schema_stats_expiry": 86400, + "innodb_change_buffer_max_size": 30, + "innodb_flush_neighbors": 0, + "innodb_ft_min_token_size": 3, + "innodb_ft_server_stopword_table": "db_name/table_name", + "innodb_lock_wait_timeout": 50, + "innodb_log_buffer_size": 16777216, + "innodb_online_alter_log_max_size": 134217728, + "innodb_read_io_threads": 10, + "innodb_rollback_on_timeout": true, + "innodb_thread_concurrency": 10, + "innodb_write_io_threads": 10, + "interactive_timeout": 3600, + "internal_tmp_mem_storage_engine": "TempTable", + "max_allowed_packet": 67108864, + "max_heap_table_size": 16777216, + "net_buffer_length": 16384, + "net_read_timeout": 30, + "net_write_timeout": 30, + "sort_buffer_size": 262144, + "sql_mode": "ANSI,TRADITIONAL", + "sql_require_primary_key": true, + "tmp_table_size": 16777216, + "wait_timeout": 28800 + } + } } ], "page": 1, diff --git a/test/fixtures/databases_postgresql_config.json b/test/fixtures/databases_postgresql_config.json new file mode 100644 index 000000000..9a93d0aa9 --- /dev/null +++ b/test/fixtures/databases_postgresql_config.json @@ -0,0 +1,367 @@ +{ + "pg": { + "autovacuum_analyze_scale_factor": { + "description": "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size)", + "maximum": 1.0, + "minimum": 0.0, + "requires_restart": false, + "type": "number" + }, + "autovacuum_analyze_threshold": { + "description": "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", + "maximum": 2147483647, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_max_workers": { + "description": "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", + "maximum": 20, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_naptime": { + "description": "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute", + "maximum": 86400, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_cost_delay": { + "description": "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds", + "maximum": 100, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_cost_limit": { + "description": "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", + "maximum": 10000, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_scale_factor": { + "description": "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size)", + "maximum": 1.0, + "minimum": 0.0, + "requires_restart": false, + "type": "number" + }, + "autovacuum_vacuum_threshold": { + "description": "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples", + "maximum": 2147483647, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_delay": { + "description": "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", + "example": 200, + "maximum": 10000, + "minimum": 10, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_flush_after": { + "description": "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", + "example": 512, + "maximum": 2048, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_lru_maxpages": { + "description": "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", + "example": 100, + "maximum": 1073741823, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_lru_multiplier": { + "description": "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a \u201cjust in time\u201d policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", + "example": 2.0, + "maximum": 10, + "minimum": 0, + "requires_restart": false, + "type": "number" + }, + "deadlock_timeout": { + "description": "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", + "example": 1000, + "maximum": 1800000, + "minimum": 500, + "requires_restart": false, + "type": "integer" + }, + "default_toast_compression": { + "description": "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", + "enum": [ + "lz4", + "pglz" + ], + "example": "lz4", + "requires_restart": false, + "type": "string" + }, + "idle_in_transaction_session_timeout": { + "description": "Time out sessions with open transactions after this number of milliseconds", + "maximum": 604800000, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "jit": { + "description": "Controls system-wide use of Just-in-Time Compilation (JIT).", + "example": true, + "requires_restart": false, + "type": "boolean" + }, + "max_files_per_process": { + "description": "PostgreSQL maximum number of files that can be open per process", + "maximum": 4096, + "minimum": 1000, + "requires_restart": false, + "type": "integer" + }, + "max_locks_per_transaction": { + "description": "PostgreSQL maximum locks per transaction", + "maximum": 6400, + "minimum": 64, + "requires_restart": false, + "type": "integer" + }, + "max_logical_replication_workers": { + "description": "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers)", + "maximum": 64, + "minimum": 4, + "requires_restart": false, + "type": "integer" + }, + "max_parallel_workers": { + "description": "Sets the maximum number of workers that the system can support for parallel queries", + "maximum": 96, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "max_parallel_workers_per_gather": { + "description": "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node", + "maximum": 96, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "max_pred_locks_per_transaction": { + "description": "PostgreSQL maximum predicate locks per transaction", + "maximum": 5120, + "minimum": 64, + "requires_restart": false, + "type": "integer" + }, + "max_replication_slots": { + "description": "PostgreSQL maximum replication slots", + "maximum": 64, + "minimum": 8, + "requires_restart": false, + "type": "integer" + }, + "max_slot_wal_keep_size": { + "description": "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", + "maximum": 2147483647, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "max_stack_depth": { + "description": "Maximum depth of the stack in bytes", + "maximum": 6291456, + "minimum": 2097152, + "requires_restart": false, + "type": "integer" + }, + "max_standby_archive_delay": { + "description": "Max standby archive delay in milliseconds", + "maximum": 43200000, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "max_standby_streaming_delay": { + "description": "Max standby streaming delay in milliseconds", + "maximum": 43200000, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "max_wal_senders": { + "description": "PostgreSQL maximum WAL senders", + "maximum": 64, + "minimum": 20, + "requires_restart": false, + "type": "integer" + }, + "max_worker_processes": { + "description": "Sets the maximum number of background processes that the system can support", + "maximum": 96, + "minimum": 8, + "requires_restart": false, + "type": "integer" + }, + "password_encryption": { + "description": "Chooses the algorithm for encrypting passwords.", + "enum": [ + "md5", + "scram-sha-256" + ], + "example": "scram-sha-256", + "requires_restart": false, + "type": [ + "string", + "null" + ] + }, + "pg_partman_bgw.interval": { + "description": "Sets the time interval to run pg_partman's scheduled tasks", + "example": 3600, + "maximum": 604800, + "minimum": 3600, + "requires_restart": false, + "type": "integer" + }, + "pg_partman_bgw.role": { + "description": "Controls which role to use for pg_partman's scheduled background tasks.", + "example": "myrolename", + "maxLength": 64, + "pattern": "^[_A-Za-z0-9][-._A-Za-z0-9]{0,63}$", + "requires_restart": false, + "type": "string" + }, + "pg_stat_monitor.pgsm_enable_query_plan": { + "description": "Enables or disables query plan monitoring", + "example": false, + "requires_restart": false, + "type": "boolean" + }, + "pg_stat_monitor.pgsm_max_buckets": { + "description": "Sets the maximum number of buckets", + "example": 10, + "maximum": 10, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "pg_stat_statements.track": { + "description": "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + "enum": [ + "all", + "top", + "none" + ], + "requires_restart": false, + "type": [ + "string" + ] + }, + "temp_file_limit": { + "description": "PostgreSQL temporary file limit in KiB, -1 for unlimited", + "example": 5000000, + "maximum": 2147483647, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "timezone": { + "description": "PostgreSQL service timezone", + "example": "Europe/Helsinki", + "maxLength": 64, + "pattern": "^[\\w/]*$", + "requires_restart": false, + "type": "string" + }, + "track_activity_query_size": { + "description": "Specifies the number of bytes reserved to track the currently executing command for each active session.", + "example": 1024, + "maximum": 10240, + "minimum": 1024, + "requires_restart": false, + "type": "integer" + }, + "track_commit_timestamp": { + "description": "Record commit time of transactions.", + "enum": [ + "off", + "on" + ], + "example": "off", + "requires_restart": false, + "type": "string" + }, + "track_functions": { + "description": "Enables tracking of function call counts and time used.", + "enum": [ + "all", + "pl", + "none" + ], + "requires_restart": false, + "type": "string" + }, + "track_io_timing": { + "description": "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", + "enum": [ + "off", + "on" + ], + "example": "off", + "requires_restart": false, + "type": "string" + }, + "wal_sender_timeout": { + "description": "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", + "example": 60000, + "requires_restart": false, + "type": "integer" + }, + "wal_writer_delay": { + "description": "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance", + "example": 50, + "maximum": 200, + "minimum": 10, + "requires_restart": false, + "type": "integer" + } + }, + "pg_stat_monitor_enable": { + "description": "Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted. When this extension is enabled, pg_stat_statements results for utility commands are unreliable", + "requires_restart": true, + "type": "boolean" + }, + "pglookout": { + "max_failover_replication_time_lag": { + "description": "Number of seconds of master unavailability before triggering database failover to standby", + "maximum": 9223372036854775000, + "minimum": 10, + "requires_restart": false, + "type": "integer" + } + }, + "shared_buffers_percentage": { + "description": "Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.", + "example": 41.5, + "maximum": 60.0, + "minimum": 20.0, + "requires_restart": false, + "type": "number" + }, + "work_mem": { + "description": "Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).", + "example": 4, + "maximum": 1024, + "minimum": 1, + "requires_restart": false, + "type": "integer" + } +} \ No newline at end of file diff --git a/test/fixtures/databases_postgresql_instances.json b/test/fixtures/databases_postgresql_instances.json index 2740b836d..92d5ce945 100644 --- a/test/fixtures/databases_postgresql_instances.json +++ b/test/fixtures/databases_postgresql_instances.json @@ -30,7 +30,60 @@ "hour_of_day": 0, "week_of_month": null }, - "version": "13.2" + "version": "13.2", + "engine_config": { + "pg": { + "autovacuum_analyze_scale_factor": 0.5, + "autovacuum_analyze_threshold": 100, + "autovacuum_max_workers": 10, + "autovacuum_naptime": 100, + "autovacuum_vacuum_cost_delay": 50, + "autovacuum_vacuum_cost_limit": 100, + "autovacuum_vacuum_scale_factor": 0.5, + "autovacuum_vacuum_threshold": 100, + "bgwriter_delay": 200, + "bgwriter_flush_after": 512, + "bgwriter_lru_maxpages": 100, + "bgwriter_lru_multiplier": 2.0, + "deadlock_timeout": 1000, + "default_toast_compression": "lz4", + "idle_in_transaction_session_timeout": 100, + "jit": true, + "max_files_per_process": 100, + "max_locks_per_transaction": 100, + "max_logical_replication_workers": 32, + "max_parallel_workers": 64, + "max_parallel_workers_per_gather": 64, + "max_pred_locks_per_transaction": 1000, + "max_replication_slots": 32, + "max_slot_wal_keep_size": 100, + "max_stack_depth": 3507152, + "max_standby_archive_delay": 1000, + "max_standby_streaming_delay": 1000, + "max_wal_senders": 32, + "max_worker_processes": 64, + "password_encryption": "scram-sha-256", + "pg_partman_bgw.interval": 3600, + "pg_partman_bgw.role": "myrolename", + "pg_stat_monitor.pgsm_enable_query_plan": false, + "pg_stat_monitor.pgsm_max_buckets": 10, + "pg_stat_statements.track": "top", + "temp_file_limit": 5000000, + "timezone": "Europe/Helsinki", + "track_activity_query_size": 1024, + "track_commit_timestamp": "off", + "track_functions": "all", + "track_io_timing": "off", + "wal_sender_timeout": 60000, + "wal_writer_delay": 50 + }, + "pg_stat_monitor_enable": true, + "pglookout": { + "max_failover_replication_time_lag": 1000 + }, + "shared_buffers_percentage": 41.5, + "work_mem": 4 + } } ], "page": 1, diff --git a/test/integration/models/database/helpers.py b/test/integration/models/database/helpers.py new file mode 100644 index 000000000..134e7e7c2 --- /dev/null +++ b/test/integration/models/database/helpers.py @@ -0,0 +1,132 @@ +from linode_api4 import LinodeClient +from linode_api4.objects import ( + MySQLDatabase, + MySQLDatabaseConfigMySQLOptions, + MySQLDatabaseConfigOptions, + PostgreSQLDatabase, + PostgreSQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigPGOptions, +) + + +# Test Helpers +def get_db_engine_id(client: LinodeClient, engine: str): + engines = client.database.engines() + engine_id = "" + for e in engines: + if e.engine == engine: + engine_id = e.id + + return str(engine_id) + + +def get_sql_db_status(client: LinodeClient, db_id, status: str): + db = client.load(MySQLDatabase, db_id) + return db.status == status + + +def get_postgres_db_status(client: LinodeClient, db_id, status: str): + db = client.load(PostgreSQLDatabase, db_id) + return db.status == status + + +def make_full_mysql_engine_config(): + return MySQLDatabaseConfigOptions( + binlog_retention_period=600, + mysql=MySQLDatabaseConfigMySQLOptions( + connect_timeout=20, + default_time_zone="+00:00", + group_concat_max_len=1024, + information_schema_stats_expiry=900, + innodb_change_buffer_max_size=25, + innodb_flush_neighbors=1, + innodb_ft_min_token_size=3, + innodb_ft_server_stopword_table="db_name/table_name", + innodb_lock_wait_timeout=50, + innodb_log_buffer_size=16777216, + innodb_online_alter_log_max_size=134217728, + innodb_read_io_threads=4, + innodb_rollback_on_timeout=True, + innodb_thread_concurrency=8, + innodb_write_io_threads=4, + interactive_timeout=300, + internal_tmp_mem_storage_engine="TempTable", + max_allowed_packet=67108864, + max_heap_table_size=16777216, + net_buffer_length=16384, + net_read_timeout=30, + net_write_timeout=60, + sort_buffer_size=262144, + sql_mode="TRADITIONAL", + sql_require_primary_key=False, + tmp_table_size=16777216, + wait_timeout=28800, + ), + ) + + +def make_mysql_engine_config_w_nullable_field(): + return MySQLDatabaseConfigOptions( + mysql=MySQLDatabaseConfigMySQLOptions( + innodb_ft_server_stopword_table=None, + ), + ) + + +def make_full_postgres_engine_config(): + return PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_scale_factor=0.1, + autovacuum_analyze_threshold=50, + autovacuum_max_workers=3, + autovacuum_naptime=60, + autovacuum_vacuum_cost_delay=20, + autovacuum_vacuum_cost_limit=200, + autovacuum_vacuum_scale_factor=0.2, + autovacuum_vacuum_threshold=50, + bgwriter_delay=200, + bgwriter_flush_after=64, + bgwriter_lru_maxpages=100, + bgwriter_lru_multiplier=2.0, + deadlock_timeout=1000, + default_toast_compression="lz4", + idle_in_transaction_session_timeout=600000, + jit=True, + max_files_per_process=1000, + max_locks_per_transaction=64, + max_logical_replication_workers=4, + max_parallel_workers=4, + max_parallel_workers_per_gather=2, + max_pred_locks_per_transaction=64, + max_replication_slots=10, + max_slot_wal_keep_size=2048, + max_stack_depth=6291456, + max_standby_archive_delay=30000, + max_standby_streaming_delay=30000, + max_wal_senders=20, + max_worker_processes=8, + password_encryption="scram-sha-256", + temp_file_limit=1, + timezone="UTC", + track_activity_query_size=2048, + track_functions="all", + wal_sender_timeout=60000, + wal_writer_delay=200, + pg_partman_bgw_interval=3600, + pg_partman_bgw_role="myrolename", + pg_stat_monitor_pgsm_enable_query_plan=True, + pg_stat_monitor_pgsm_max_buckets=2, + pg_stat_statements_track="top", + ), + pg_stat_monitor_enable=True, + shared_buffers_percentage=25.0, + work_mem=1024, + ) + + +def make_postgres_engine_config_w_password_encryption_null(): + return PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + password_encryption=None, + ), + ) diff --git a/test/integration/models/database/test_database.py b/test/integration/models/database/test_database.py index 351c09c2a..dbb763c55 100644 --- a/test/integration/models/database/test_database.py +++ b/test/integration/models/database/test_database.py @@ -5,34 +5,17 @@ send_request_when_resource_available, wait_for_condition, ) +from test.integration.models.database.helpers import ( + get_db_engine_id, + get_postgres_db_status, + get_sql_db_status, +) import pytest -from linode_api4 import LinodeClient from linode_api4.objects import MySQLDatabase, PostgreSQLDatabase -# Test Helpers -def get_db_engine_id(client: LinodeClient, engine: str): - engines = client.database.engines() - engine_id = "" - for e in engines: - if e.engine == engine: - engine_id = e.id - - return str(engine_id) - - -def get_sql_db_status(client: LinodeClient, db_id, status: str): - db = client.load(MySQLDatabase, db_id) - return db.status == status - - -def get_postgres_db_status(client: LinodeClient, db_id, status: str): - db = client.load(PostgreSQLDatabase, db_id) - return db.status == status - - @pytest.fixture(scope="session") def test_create_sql_db(test_linode_client): client = test_linode_client diff --git a/test/integration/models/database/test_database_engine_config.py b/test/integration/models/database/test_database_engine_config.py new file mode 100644 index 000000000..446281a2d --- /dev/null +++ b/test/integration/models/database/test_database_engine_config.py @@ -0,0 +1,475 @@ +import os +from test.integration.helpers import ( + get_test_label, + send_request_when_resource_available, + wait_for_condition, +) +from test.integration.models.database.helpers import ( + get_db_engine_id, + get_postgres_db_status, + get_sql_db_status, + make_full_mysql_engine_config, + make_full_postgres_engine_config, + make_mysql_engine_config_w_nullable_field, + make_postgres_engine_config_w_password_encryption_null, +) + +import pytest + +from linode_api4.errors import ApiError +from linode_api4.objects import ( + MySQLDatabase, + MySQLDatabaseConfigMySQLOptions, + MySQLDatabaseConfigOptions, + PostgreSQLDatabase, + PostgreSQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigPGOptions, +) + + +@pytest.fixture(scope="session") +def mysql_db_with_engine_config(test_linode_client): + client = test_linode_client + label = get_test_label() + "-sqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "mysql") + dbtype = "g6-standard-1" + + db = client.database.mysql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=make_full_mysql_engine_config(), + ) + + def get_db_status(): + return db.status == "active" + + # Usually take 10-15m to provision + wait_for_condition(60, 2000, get_db_status) + + yield db + + send_request_when_resource_available(300, db.delete) + + +@pytest.fixture(scope="session") +def postgres_db_with_engine_config(test_linode_client): + client = test_linode_client + label = get_test_label() + "-postgresqldb" + region = "us-ord" + engine_id = "postgresql/17" + dbtype = "g6-standard-1" + + db = client.database.postgresql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=make_full_postgres_engine_config(), + ) + + def get_db_status(): + return db.status == "active" + + # Usually take 10-15m to provision + wait_for_condition(60, 2000, get_db_status) + + yield db + + send_request_when_resource_available(300, db.delete) + + +# MYSQL +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_mysql_config(test_linode_client): + config = test_linode_client.database.mysql_config_options() + + # Top-level keys + assert "binlog_retention_period" in config + assert "mysql" in config + + # binlog_retention_period checks + brp = config["binlog_retention_period"] + assert isinstance(brp, dict) + assert brp["type"] == "integer" + assert brp["minimum"] == 600 + assert brp["maximum"] == 86400 + assert brp["requires_restart"] is False + + # mysql sub-keys + mysql = config["mysql"] + + # mysql valid fields + expected_keys = [ + "connect_timeout", + "default_time_zone", + "group_concat_max_len", + "information_schema_stats_expiry", + "innodb_change_buffer_max_size", + "innodb_flush_neighbors", + "innodb_ft_min_token_size", + "innodb_ft_server_stopword_table", + "innodb_lock_wait_timeout", + "innodb_log_buffer_size", + "innodb_online_alter_log_max_size", + "innodb_read_io_threads", + "innodb_rollback_on_timeout", + "innodb_thread_concurrency", + "innodb_write_io_threads", + "interactive_timeout", + "internal_tmp_mem_storage_engine", + "max_allowed_packet", + "max_heap_table_size", + "net_buffer_length", + "net_read_timeout", + "net_write_timeout", + "sort_buffer_size", + "sql_mode", + "sql_require_primary_key", + "tmp_table_size", + "wait_timeout", + ] + + # Assert all valid fields are present + for key in expected_keys: + assert key in mysql, f"{key} not found in mysql config" + + assert mysql["connect_timeout"]["type"] == "integer" + assert mysql["default_time_zone"]["type"] == "string" + assert mysql["innodb_rollback_on_timeout"]["type"] == "boolean" + assert "enum" in mysql["internal_tmp_mem_storage_engine"] + assert "pattern" in mysql["sql_mode"] + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_mysql_with_engine_config(mysql_db_with_engine_config): + db = mysql_db_with_engine_config + actual_config = db.engine_config.mysql + expected_config = make_full_mysql_engine_config().mysql.__dict__ + + for key, expected_value in expected_config.items(): + actual_value = getattr(actual_config, key) + assert ( + actual_value == expected_value + ), f"{key} mismatch: expected {expected_value}, got {actual_value}" + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_update_mysql_engine_config( + test_linode_client, mysql_db_with_engine_config +): + db = mysql_db_with_engine_config + + db.updates.day_of_week = 2 + db.engine_config = MySQLDatabaseConfigOptions( + mysql=MySQLDatabaseConfigMySQLOptions(connect_timeout=50), + binlog_retention_period=880, + ) + + db.save() + + wait_for_condition( + 30, + 300, + get_sql_db_status, + test_linode_client, + db.id, + "active", + ) + + database = test_linode_client.load(MySQLDatabase, db.id) + + assert database.updates.day_of_week == 2 + assert database.engine_config.mysql.connect_timeout == 50 + assert database.engine_config.binlog_retention_period == 880 + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_list_mysql_engine_config( + test_linode_client, mysql_db_with_engine_config +): + dbs = test_linode_client.database.mysql_instances() + + db_ids = [db.id for db in dbs] + + assert mysql_db_with_engine_config.id in db_ids + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_mysql_engine_config( + test_linode_client, mysql_db_with_engine_config +): + db = test_linode_client.load(MySQLDatabase, mysql_db_with_engine_config.id) + + assert isinstance(db, MySQLDatabase) + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_mysql_db_nullable_field(test_linode_client): + client = test_linode_client + label = get_test_label(5) + "-sqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "mysql") + dbtype = "g6-standard-1" + + db = client.database.mysql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=make_mysql_engine_config_w_nullable_field(), + ) + + assert db.engine_config.mysql.innodb_ft_server_stopword_table is None + + send_request_when_resource_available(300, db.delete) + + +# POSTGRESQL +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_postgres_config(test_linode_client): + config = test_linode_client.database.postgresql_config_options() + + # Top-level keys and structure + assert "pg" in config + + assert "pg_stat_monitor_enable" in config + assert config["pg_stat_monitor_enable"]["type"] == "boolean" + + assert "shared_buffers_percentage" in config + assert config["shared_buffers_percentage"]["type"] == "number" + assert config["shared_buffers_percentage"]["minimum"] >= 1 + + assert "work_mem" in config + assert config["work_mem"]["type"] == "integer" + assert "minimum" in config["work_mem"] + + pg = config["pg"] + + # postgres valid fields + expected_keys = [ + "autovacuum_analyze_scale_factor", + "autovacuum_analyze_threshold", + "autovacuum_max_workers", + "autovacuum_naptime", + "autovacuum_vacuum_cost_delay", + "autovacuum_vacuum_cost_limit", + "autovacuum_vacuum_scale_factor", + "autovacuum_vacuum_threshold", + "bgwriter_delay", + "bgwriter_flush_after", + "bgwriter_lru_maxpages", + "bgwriter_lru_multiplier", + "deadlock_timeout", + "default_toast_compression", + "idle_in_transaction_session_timeout", + "jit", + "max_files_per_process", + "max_locks_per_transaction", + "max_logical_replication_workers", + "max_parallel_workers", + "max_parallel_workers_per_gather", + "max_pred_locks_per_transaction", + "max_replication_slots", + "max_slot_wal_keep_size", + "max_stack_depth", + "max_standby_archive_delay", + "max_standby_streaming_delay", + "max_wal_senders", + "max_worker_processes", + "password_encryption", + "pg_partman_bgw.interval", + "pg_partman_bgw.role", + "pg_stat_monitor.pgsm_enable_query_plan", + "pg_stat_monitor.pgsm_max_buckets", + "pg_stat_statements.track", + "temp_file_limit", + "timezone", + "track_activity_query_size", + "track_commit_timestamp", + "track_functions", + "track_io_timing", + "wal_sender_timeout", + "wal_writer_delay", + ] + + # Assert all valid fields are present + for key in expected_keys: + assert key in pg, f"{key} not found in postgresql config" + + assert pg["autovacuum_analyze_scale_factor"]["type"] == "number" + assert pg["autovacuum_analyze_threshold"]["type"] == "integer" + assert pg["autovacuum_max_workers"]["requires_restart"] is True + assert pg["default_toast_compression"]["enum"] == ["lz4", "pglz"] + assert pg["jit"]["type"] == "boolean" + assert "enum" in pg["password_encryption"] + assert "pattern" in pg["pg_partman_bgw.role"] + assert pg["pg_stat_monitor.pgsm_enable_query_plan"]["type"] == "boolean" + assert pg["pg_stat_monitor.pgsm_max_buckets"]["requires_restart"] is True + assert pg["pg_stat_statements.track"]["enum"] == ["all", "top", "none"] + assert pg["track_commit_timestamp"]["enum"] == ["off", "on"] + assert pg["track_functions"]["enum"] == ["all", "pl", "none"] + assert pg["track_io_timing"]["enum"] == ["off", "on"] + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_postgres_with_engine_config( + test_linode_client, postgres_db_with_engine_config +): + db = postgres_db_with_engine_config + actual_config = db.engine_config.pg + expected_config = make_full_postgres_engine_config().pg.__dict__ + + for key, expected_value in expected_config.items(): + actual_value = getattr(actual_config, key, None) + assert ( + actual_value is None or actual_value == expected_value + ), f"{key} mismatch: expected {expected_value}, got {actual_value}" + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_update_postgres_engine_config( + test_linode_client, postgres_db_with_engine_config +): + db = postgres_db_with_engine_config + + db.updates.day_of_week = 2 + db.engine_config = PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_threshold=70, deadlock_timeout=2000 + ), + shared_buffers_percentage=25.0, + ) + + db.save() + + wait_for_condition( + 30, + 300, + get_postgres_db_status, + test_linode_client, + db.id, + "active", + ) + + database = test_linode_client.load(PostgreSQLDatabase, db.id) + + assert database.updates.day_of_week == 2 + assert database.engine_config.pg.autovacuum_analyze_threshold == 70 + assert database.engine_config.pg.deadlock_timeout == 2000 + assert database.engine_config.shared_buffers_percentage == 25.0 + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_pg13_with_lz4_error(test_linode_client): + client = test_linode_client + label = get_test_label() + "-postgresqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "postgresql/13") + dbtype = "g6-standard-1" + + try: + client.database.postgresql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + default_toast_compression="lz4" + ), + work_mem=4, + ), + ) + except ApiError as e: + assert "An error occurred" in str(e.json) + assert e.status == 500 + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_list_postgres_engine_config( + test_linode_client, postgres_db_with_engine_config +): + dbs = test_linode_client.database.postgresql_instances() + + db_ids = [db.id for db in dbs] + + assert postgres_db_with_engine_config.id in db_ids + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_postgres_engine_config( + test_linode_client, postgres_db_with_engine_config +): + db = test_linode_client.load( + PostgreSQLDatabase, postgres_db_with_engine_config.id + ) + + assert isinstance(db, PostgreSQLDatabase) + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_postgres_db_password_encryption_default_md5(test_linode_client): + client = test_linode_client + label = get_test_label() + "-postgresqldb" + region = "us-ord" + engine_id = "postgresql/17" + dbtype = "g6-standard-1" + + db = client.database.postgresql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=make_postgres_engine_config_w_password_encryption_null(), + ) + + assert db.engine_config.pg.password_encryption == "md5" + + send_request_when_resource_available(300, db.delete) diff --git a/test/unit/groups/database_test.py b/test/unit/groups/database_test.py index 09d842b77..d1939aec7 100644 --- a/test/unit/groups/database_test.py +++ b/test/unit/groups/database_test.py @@ -132,6 +132,1194 @@ def test_create(self): self.assertEqual(m.call_data["type"], "g6-standard-1") self.assertEqual(m.call_data["cluster_size"], 3) + def test_mysql_config_options(self): + """ + Test that MySQL configuration options can be retrieved + """ + + config = self.client.database.mysql_config_options() + + self.assertEqual( + "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake", + config["mysql"]["connect_timeout"]["description"], + ) + self.assertEqual(10, config["mysql"]["connect_timeout"]["example"]) + self.assertEqual(3600, config["mysql"]["connect_timeout"]["maximum"]) + self.assertEqual(2, config["mysql"]["connect_timeout"]["minimum"]) + self.assertFalse(config["mysql"]["connect_timeout"]["requires_restart"]) + self.assertEqual("integer", config["mysql"]["connect_timeout"]["type"]) + + self.assertEqual( + "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", + config["mysql"]["default_time_zone"]["description"], + ) + self.assertEqual( + "+03:00", config["mysql"]["default_time_zone"]["example"] + ) + self.assertEqual(100, config["mysql"]["default_time_zone"]["maxLength"]) + self.assertEqual(2, config["mysql"]["default_time_zone"]["minLength"]) + self.assertEqual( + "^([-+][\\d:]*|[\\w/]*)$", + config["mysql"]["default_time_zone"]["pattern"], + ) + self.assertFalse( + config["mysql"]["default_time_zone"]["requires_restart"] + ) + self.assertEqual("string", config["mysql"]["default_time_zone"]["type"]) + + self.assertEqual( + "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", + config["mysql"]["group_concat_max_len"]["description"], + ) + self.assertEqual( + 1024, config["mysql"]["group_concat_max_len"]["example"] + ) + self.assertEqual( + 18446744073709551600, + config["mysql"]["group_concat_max_len"]["maximum"], + ) + self.assertEqual(4, config["mysql"]["group_concat_max_len"]["minimum"]) + self.assertFalse( + config["mysql"]["group_concat_max_len"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["group_concat_max_len"]["type"] + ) + + self.assertEqual( + "The time, in seconds, before cached statistics expire", + config["mysql"]["information_schema_stats_expiry"]["description"], + ) + self.assertEqual( + 86400, config["mysql"]["information_schema_stats_expiry"]["example"] + ) + self.assertEqual( + 31536000, + config["mysql"]["information_schema_stats_expiry"]["maximum"], + ) + self.assertEqual( + 900, config["mysql"]["information_schema_stats_expiry"]["minimum"] + ) + self.assertFalse( + config["mysql"]["information_schema_stats_expiry"][ + "requires_restart" + ] + ) + self.assertEqual( + "integer", + config["mysql"]["information_schema_stats_expiry"]["type"], + ) + + self.assertEqual( + "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25", + config["mysql"]["innodb_change_buffer_max_size"]["description"], + ) + self.assertEqual( + 30, config["mysql"]["innodb_change_buffer_max_size"]["example"] + ) + self.assertEqual( + 50, config["mysql"]["innodb_change_buffer_max_size"]["maximum"] + ) + self.assertEqual( + 0, config["mysql"]["innodb_change_buffer_max_size"]["minimum"] + ) + self.assertFalse( + config["mysql"]["innodb_change_buffer_max_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_change_buffer_max_size"]["type"] + ) + + self.assertEqual( + "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent", + config["mysql"]["innodb_flush_neighbors"]["description"], + ) + self.assertEqual( + 0, config["mysql"]["innodb_flush_neighbors"]["example"] + ) + self.assertEqual( + 2, config["mysql"]["innodb_flush_neighbors"]["maximum"] + ) + self.assertEqual( + 0, config["mysql"]["innodb_flush_neighbors"]["minimum"] + ) + self.assertFalse( + config["mysql"]["innodb_flush_neighbors"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_flush_neighbors"]["type"] + ) + + self.assertEqual( + "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", + config["mysql"]["innodb_ft_min_token_size"]["description"], + ) + self.assertEqual( + 3, config["mysql"]["innodb_ft_min_token_size"]["example"] + ) + self.assertEqual( + 16, config["mysql"]["innodb_ft_min_token_size"]["maximum"] + ) + self.assertEqual( + 0, config["mysql"]["innodb_ft_min_token_size"]["minimum"] + ) + self.assertTrue( + config["mysql"]["innodb_ft_min_token_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_ft_min_token_size"]["type"] + ) + + self.assertEqual( + "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", + config["mysql"]["innodb_ft_server_stopword_table"]["description"], + ) + self.assertEqual( + "db_name/table_name", + config["mysql"]["innodb_ft_server_stopword_table"]["example"], + ) + self.assertEqual( + 1024, + config["mysql"]["innodb_ft_server_stopword_table"]["maxLength"], + ) + self.assertEqual( + "^.+/.+$", + config["mysql"]["innodb_ft_server_stopword_table"]["pattern"], + ) + self.assertFalse( + config["mysql"]["innodb_ft_server_stopword_table"][ + "requires_restart" + ] + ) + self.assertEqual( + ["null", "string"], + config["mysql"]["innodb_ft_server_stopword_table"]["type"], + ) + + self.assertEqual( + "The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120.", + config["mysql"]["innodb_lock_wait_timeout"]["description"], + ) + self.assertEqual( + 50, config["mysql"]["innodb_lock_wait_timeout"]["example"] + ) + self.assertEqual( + 3600, config["mysql"]["innodb_lock_wait_timeout"]["maximum"] + ) + self.assertEqual( + 1, config["mysql"]["innodb_lock_wait_timeout"]["minimum"] + ) + self.assertFalse( + config["mysql"]["innodb_lock_wait_timeout"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_lock_wait_timeout"]["type"] + ) + + self.assertEqual( + "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", + config["mysql"]["innodb_log_buffer_size"]["description"], + ) + self.assertEqual( + 16777216, config["mysql"]["innodb_log_buffer_size"]["example"] + ) + self.assertEqual( + 4294967295, config["mysql"]["innodb_log_buffer_size"]["maximum"] + ) + self.assertEqual( + 1048576, config["mysql"]["innodb_log_buffer_size"]["minimum"] + ) + self.assertFalse( + config["mysql"]["innodb_log_buffer_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_log_buffer_size"]["type"] + ) + + self.assertEqual( + "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", + config["mysql"]["innodb_online_alter_log_max_size"]["description"], + ) + self.assertEqual( + 134217728, + config["mysql"]["innodb_online_alter_log_max_size"]["example"], + ) + self.assertEqual( + 1099511627776, + config["mysql"]["innodb_online_alter_log_max_size"]["maximum"], + ) + self.assertEqual( + 65536, + config["mysql"]["innodb_online_alter_log_max_size"]["minimum"], + ) + self.assertFalse( + config["mysql"]["innodb_online_alter_log_max_size"][ + "requires_restart" + ] + ) + self.assertEqual( + "integer", + config["mysql"]["innodb_online_alter_log_max_size"]["type"], + ) + + self.assertEqual( + "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + config["mysql"]["innodb_read_io_threads"]["description"], + ) + self.assertEqual( + 10, config["mysql"]["innodb_read_io_threads"]["example"] + ) + self.assertEqual( + 64, config["mysql"]["innodb_read_io_threads"]["maximum"] + ) + self.assertEqual( + 1, config["mysql"]["innodb_read_io_threads"]["minimum"] + ) + self.assertTrue( + config["mysql"]["innodb_read_io_threads"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_read_io_threads"]["type"] + ) + + self.assertEqual( + "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", + config["mysql"]["innodb_rollback_on_timeout"]["description"], + ) + self.assertTrue( + config["mysql"]["innodb_rollback_on_timeout"]["example"] + ) + self.assertTrue( + config["mysql"]["innodb_rollback_on_timeout"]["requires_restart"] + ) + self.assertEqual( + "boolean", config["mysql"]["innodb_rollback_on_timeout"]["type"] + ) + + self.assertEqual( + "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit)", + config["mysql"]["innodb_thread_concurrency"]["description"], + ) + self.assertEqual( + 10, config["mysql"]["innodb_thread_concurrency"]["example"] + ) + self.assertEqual( + 1000, config["mysql"]["innodb_thread_concurrency"]["maximum"] + ) + self.assertEqual( + 0, config["mysql"]["innodb_thread_concurrency"]["minimum"] + ) + self.assertFalse( + config["mysql"]["innodb_thread_concurrency"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_thread_concurrency"]["type"] + ) + + self.assertEqual( + "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + config["mysql"]["innodb_write_io_threads"]["description"], + ) + self.assertEqual( + 10, config["mysql"]["innodb_write_io_threads"]["example"] + ) + self.assertEqual( + 64, config["mysql"]["innodb_write_io_threads"]["maximum"] + ) + self.assertEqual( + 1, config["mysql"]["innodb_write_io_threads"]["minimum"] + ) + self.assertTrue( + config["mysql"]["innodb_write_io_threads"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_write_io_threads"]["type"] + ) + + self.assertEqual( + "The number of seconds the server waits for activity on an interactive connection before closing it.", + config["mysql"]["interactive_timeout"]["description"], + ) + self.assertEqual( + 3600, config["mysql"]["interactive_timeout"]["example"] + ) + self.assertEqual( + 604800, config["mysql"]["interactive_timeout"]["maximum"] + ) + self.assertEqual(30, config["mysql"]["interactive_timeout"]["minimum"]) + self.assertFalse( + config["mysql"]["interactive_timeout"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["interactive_timeout"]["type"] + ) + + self.assertEqual( + "The storage engine for in-memory internal temporary tables.", + config["mysql"]["internal_tmp_mem_storage_engine"]["description"], + ) + self.assertEqual( + "TempTable", + config["mysql"]["internal_tmp_mem_storage_engine"]["example"], + ) + self.assertEqual( + ["TempTable", "MEMORY"], + config["mysql"]["internal_tmp_mem_storage_engine"]["enum"], + ) + self.assertFalse( + config["mysql"]["internal_tmp_mem_storage_engine"][ + "requires_restart" + ] + ) + self.assertEqual( + "string", config["mysql"]["internal_tmp_mem_storage_engine"]["type"] + ) + + self.assertEqual( + "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M)", + config["mysql"]["max_allowed_packet"]["description"], + ) + self.assertEqual( + 67108864, config["mysql"]["max_allowed_packet"]["example"] + ) + self.assertEqual( + 1073741824, config["mysql"]["max_allowed_packet"]["maximum"] + ) + self.assertEqual( + 102400, config["mysql"]["max_allowed_packet"]["minimum"] + ) + self.assertFalse( + config["mysql"]["max_allowed_packet"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["max_allowed_packet"]["type"] + ) + + self.assertEqual( + "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M)", + config["mysql"]["max_heap_table_size"]["description"], + ) + self.assertEqual( + 16777216, config["mysql"]["max_heap_table_size"]["example"] + ) + self.assertEqual( + 1073741824, config["mysql"]["max_heap_table_size"]["maximum"] + ) + self.assertEqual( + 1048576, config["mysql"]["max_heap_table_size"]["minimum"] + ) + self.assertFalse( + config["mysql"]["max_heap_table_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["max_heap_table_size"]["type"] + ) + + self.assertEqual( + "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", + config["mysql"]["net_buffer_length"]["description"], + ) + self.assertEqual(16384, config["mysql"]["net_buffer_length"]["example"]) + self.assertEqual( + 1048576, config["mysql"]["net_buffer_length"]["maximum"] + ) + self.assertEqual(1024, config["mysql"]["net_buffer_length"]["minimum"]) + self.assertTrue( + config["mysql"]["net_buffer_length"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["net_buffer_length"]["type"] + ) + + self.assertEqual( + "The number of seconds to wait for more data from a connection before aborting the read.", + config["mysql"]["net_read_timeout"]["description"], + ) + self.assertEqual(30, config["mysql"]["net_read_timeout"]["example"]) + self.assertEqual(3600, config["mysql"]["net_read_timeout"]["maximum"]) + self.assertEqual(1, config["mysql"]["net_read_timeout"]["minimum"]) + self.assertFalse( + config["mysql"]["net_read_timeout"]["requires_restart"] + ) + self.assertEqual("integer", config["mysql"]["net_read_timeout"]["type"]) + + self.assertEqual( + "The number of seconds to wait for a block to be written to a connection before aborting the write.", + config["mysql"]["net_write_timeout"]["description"], + ) + self.assertEqual(30, config["mysql"]["net_write_timeout"]["example"]) + self.assertEqual(3600, config["mysql"]["net_write_timeout"]["maximum"]) + self.assertEqual(1, config["mysql"]["net_write_timeout"]["minimum"]) + self.assertFalse( + config["mysql"]["net_write_timeout"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["net_write_timeout"]["type"] + ) + + self.assertEqual( + "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K)", + config["mysql"]["sort_buffer_size"]["description"], + ) + self.assertEqual(262144, config["mysql"]["sort_buffer_size"]["example"]) + self.assertEqual( + 1073741824, config["mysql"]["sort_buffer_size"]["maximum"] + ) + self.assertEqual(32768, config["mysql"]["sort_buffer_size"]["minimum"]) + self.assertFalse( + config["mysql"]["sort_buffer_size"]["requires_restart"] + ) + self.assertEqual("integer", config["mysql"]["sort_buffer_size"]["type"]) + + self.assertEqual( + "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Akamai default SQL mode (strict, SQL standard compliant) will be assigned.", + config["mysql"]["sql_mode"]["description"], + ) + self.assertEqual( + "ANSI,TRADITIONAL", config["mysql"]["sql_mode"]["example"] + ) + self.assertEqual(1024, config["mysql"]["sql_mode"]["maxLength"]) + self.assertEqual( + "^[A-Z_]*(,[A-Z_]+)*$", config["mysql"]["sql_mode"]["pattern"] + ) + self.assertFalse(config["mysql"]["sql_mode"]["requires_restart"]) + self.assertEqual("string", config["mysql"]["sql_mode"]["type"]) + + self.assertEqual( + "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", + config["mysql"]["sql_require_primary_key"]["description"], + ) + self.assertTrue(config["mysql"]["sql_require_primary_key"]["example"]) + self.assertFalse( + config["mysql"]["sql_require_primary_key"]["requires_restart"] + ) + self.assertEqual( + "boolean", config["mysql"]["sql_require_primary_key"]["type"] + ) + + self.assertEqual( + "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M)", + config["mysql"]["tmp_table_size"]["description"], + ) + self.assertEqual(16777216, config["mysql"]["tmp_table_size"]["example"]) + self.assertEqual( + 1073741824, config["mysql"]["tmp_table_size"]["maximum"] + ) + self.assertEqual(1048576, config["mysql"]["tmp_table_size"]["minimum"]) + self.assertFalse(config["mysql"]["tmp_table_size"]["requires_restart"]) + self.assertEqual("integer", config["mysql"]["tmp_table_size"]["type"]) + + self.assertEqual( + "The number of seconds the server waits for activity on a noninteractive connection before closing it.", + config["mysql"]["wait_timeout"]["description"], + ) + self.assertEqual(28800, config["mysql"]["wait_timeout"]["example"]) + self.assertEqual(2147483, config["mysql"]["wait_timeout"]["maximum"]) + self.assertEqual(1, config["mysql"]["wait_timeout"]["minimum"]) + self.assertFalse(config["mysql"]["wait_timeout"]["requires_restart"]) + self.assertEqual("integer", config["mysql"]["wait_timeout"]["type"]) + + self.assertEqual( + "The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector.", + config["binlog_retention_period"]["description"], + ) + self.assertEqual(600, config["binlog_retention_period"]["example"]) + self.assertEqual(86400, config["binlog_retention_period"]["maximum"]) + self.assertEqual(600, config["binlog_retention_period"]["minimum"]) + self.assertFalse(config["binlog_retention_period"]["requires_restart"]) + self.assertEqual("integer", config["binlog_retention_period"]["type"]) + + def test_postgresql_config_options(self): + """ + Test that PostgreSQL configuration options can be retrieved + """ + + config = self.client.database.postgresql_config_options() + + self.assertEqual( + "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when " + + "deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size)", + config["pg"]["autovacuum_analyze_scale_factor"]["description"], + ) + self.assertEqual( + 1.0, config["pg"]["autovacuum_analyze_scale_factor"]["maximum"] + ) + self.assertEqual( + 0.0, config["pg"]["autovacuum_analyze_scale_factor"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_analyze_scale_factor"]["requires_restart"] + ) + self.assertEqual( + "number", config["pg"]["autovacuum_analyze_scale_factor"]["type"] + ) + + self.assertEqual( + "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", + config["pg"]["autovacuum_analyze_threshold"]["description"], + ) + self.assertEqual( + 2147483647, config["pg"]["autovacuum_analyze_threshold"]["maximum"] + ) + self.assertEqual( + 0, config["pg"]["autovacuum_analyze_threshold"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_analyze_threshold"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["autovacuum_analyze_threshold"]["type"] + ) + + self.assertEqual( + "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", + config["pg"]["autovacuum_max_workers"]["description"], + ) + self.assertEqual(20, config["pg"]["autovacuum_max_workers"]["maximum"]) + self.assertEqual(1, config["pg"]["autovacuum_max_workers"]["minimum"]) + self.assertFalse( + config["pg"]["autovacuum_max_workers"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["autovacuum_max_workers"]["type"] + ) + + self.assertEqual( + "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute", + config["pg"]["autovacuum_naptime"]["description"], + ) + self.assertEqual(86400, config["pg"]["autovacuum_naptime"]["maximum"]) + self.assertEqual(1, config["pg"]["autovacuum_naptime"]["minimum"]) + self.assertFalse(config["pg"]["autovacuum_naptime"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["autovacuum_naptime"]["type"]) + + self.assertEqual( + "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds", + config["pg"]["autovacuum_vacuum_cost_delay"]["description"], + ) + self.assertEqual( + 100, config["pg"]["autovacuum_vacuum_cost_delay"]["maximum"] + ) + self.assertEqual( + -1, config["pg"]["autovacuum_vacuum_cost_delay"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_vacuum_cost_delay"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["autovacuum_vacuum_cost_delay"]["type"] + ) + + self.assertEqual( + "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", + config["pg"]["autovacuum_vacuum_cost_limit"]["description"], + ) + self.assertEqual( + 10000, config["pg"]["autovacuum_vacuum_cost_limit"]["maximum"] + ) + self.assertEqual( + -1, config["pg"]["autovacuum_vacuum_cost_limit"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_vacuum_cost_limit"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["autovacuum_vacuum_cost_limit"]["type"] + ) + + self.assertEqual( + "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size)", + config["pg"]["autovacuum_vacuum_scale_factor"]["description"], + ) + self.assertEqual( + 1.0, config["pg"]["autovacuum_vacuum_scale_factor"]["maximum"] + ) + self.assertEqual( + 0.0, config["pg"]["autovacuum_vacuum_scale_factor"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_vacuum_scale_factor"]["requires_restart"] + ) + self.assertEqual( + "number", config["pg"]["autovacuum_vacuum_scale_factor"]["type"] + ) + + self.assertEqual( + "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples", + config["pg"]["autovacuum_vacuum_threshold"]["description"], + ) + self.assertEqual( + 2147483647, config["pg"]["autovacuum_vacuum_threshold"]["maximum"] + ) + self.assertEqual( + 0, config["pg"]["autovacuum_vacuum_threshold"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_vacuum_threshold"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["autovacuum_vacuum_threshold"]["type"] + ) + + self.assertEqual( + "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", + config["pg"]["bgwriter_delay"]["description"], + ) + self.assertEqual(200, config["pg"]["bgwriter_delay"]["example"]) + self.assertEqual(10000, config["pg"]["bgwriter_delay"]["maximum"]) + self.assertEqual(10, config["pg"]["bgwriter_delay"]["minimum"]) + self.assertFalse(config["pg"]["bgwriter_delay"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["bgwriter_delay"]["type"]) + + self.assertEqual( + "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", + config["pg"]["bgwriter_flush_after"]["description"], + ) + self.assertEqual(512, config["pg"]["bgwriter_flush_after"]["example"]) + self.assertEqual(2048, config["pg"]["bgwriter_flush_after"]["maximum"]) + self.assertEqual(0, config["pg"]["bgwriter_flush_after"]["minimum"]) + self.assertFalse( + config["pg"]["bgwriter_flush_after"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["bgwriter_flush_after"]["type"] + ) + + self.assertEqual( + "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", + config["pg"]["bgwriter_lru_maxpages"]["description"], + ) + self.assertEqual(100, config["pg"]["bgwriter_lru_maxpages"]["example"]) + self.assertEqual( + 1073741823, config["pg"]["bgwriter_lru_maxpages"]["maximum"] + ) + self.assertEqual(0, config["pg"]["bgwriter_lru_maxpages"]["minimum"]) + self.assertFalse( + config["pg"]["bgwriter_lru_maxpages"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["bgwriter_lru_maxpages"]["type"] + ) + + self.assertEqual( + "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", + config["pg"]["bgwriter_lru_multiplier"]["description"], + ) + self.assertEqual( + 2.0, config["pg"]["bgwriter_lru_multiplier"]["example"] + ) + self.assertEqual( + 10.0, config["pg"]["bgwriter_lru_multiplier"]["maximum"] + ) + self.assertEqual( + 0.0, config["pg"]["bgwriter_lru_multiplier"]["minimum"] + ) + self.assertFalse( + config["pg"]["bgwriter_lru_multiplier"]["requires_restart"] + ) + self.assertEqual( + "number", config["pg"]["bgwriter_lru_multiplier"]["type"] + ) + + self.assertEqual( + "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", + config["pg"]["deadlock_timeout"]["description"], + ) + self.assertEqual(1000, config["pg"]["deadlock_timeout"]["example"]) + self.assertEqual(1800000, config["pg"]["deadlock_timeout"]["maximum"]) + self.assertEqual(500, config["pg"]["deadlock_timeout"]["minimum"]) + self.assertFalse(config["pg"]["deadlock_timeout"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["deadlock_timeout"]["type"]) + + self.assertEqual( + "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", + config["pg"]["default_toast_compression"]["description"], + ) + self.assertEqual( + ["lz4", "pglz"], config["pg"]["default_toast_compression"]["enum"] + ) + self.assertEqual( + "lz4", config["pg"]["default_toast_compression"]["example"] + ) + self.assertFalse( + config["pg"]["default_toast_compression"]["requires_restart"] + ) + self.assertEqual( + "string", config["pg"]["default_toast_compression"]["type"] + ) + + self.assertEqual( + "Time out sessions with open transactions after this number of milliseconds", + config["pg"]["idle_in_transaction_session_timeout"]["description"], + ) + self.assertEqual( + 604800000, + config["pg"]["idle_in_transaction_session_timeout"]["maximum"], + ) + self.assertEqual( + 0, config["pg"]["idle_in_transaction_session_timeout"]["minimum"] + ) + self.assertFalse( + config["pg"]["idle_in_transaction_session_timeout"][ + "requires_restart" + ] + ) + self.assertEqual( + "integer", + config["pg"]["idle_in_transaction_session_timeout"]["type"], + ) + + self.assertEqual( + "Controls system-wide use of Just-in-Time Compilation (JIT).", + config["pg"]["jit"]["description"], + ) + self.assertTrue(config["pg"]["jit"]["example"]) + self.assertFalse(config["pg"]["jit"]["requires_restart"]) + self.assertEqual("boolean", config["pg"]["jit"]["type"]) + + self.assertEqual( + "PostgreSQL maximum number of files that can be open per process", + config["pg"]["max_files_per_process"]["description"], + ) + self.assertEqual(4096, config["pg"]["max_files_per_process"]["maximum"]) + self.assertEqual(1000, config["pg"]["max_files_per_process"]["minimum"]) + self.assertFalse( + config["pg"]["max_files_per_process"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_files_per_process"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum locks per transaction", + config["pg"]["max_locks_per_transaction"]["description"], + ) + self.assertEqual( + 6400, config["pg"]["max_locks_per_transaction"]["maximum"] + ) + self.assertEqual( + 64, config["pg"]["max_locks_per_transaction"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_locks_per_transaction"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_locks_per_transaction"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers)", + config["pg"]["max_logical_replication_workers"]["description"], + ) + self.assertEqual( + 64, config["pg"]["max_logical_replication_workers"]["maximum"] + ) + self.assertEqual( + 4, config["pg"]["max_logical_replication_workers"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_logical_replication_workers"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_logical_replication_workers"]["type"] + ) + + self.assertEqual( + "Sets the maximum number of workers that the system can support for parallel queries", + config["pg"]["max_parallel_workers"]["description"], + ) + self.assertEqual(96, config["pg"]["max_parallel_workers"]["maximum"]) + self.assertEqual(0, config["pg"]["max_parallel_workers"]["minimum"]) + self.assertFalse( + config["pg"]["max_parallel_workers"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_parallel_workers"]["type"] + ) + + self.assertEqual( + "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node", + config["pg"]["max_parallel_workers_per_gather"]["description"], + ) + self.assertEqual( + 96, config["pg"]["max_parallel_workers_per_gather"]["maximum"] + ) + self.assertEqual( + 0, config["pg"]["max_parallel_workers_per_gather"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_parallel_workers_per_gather"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_parallel_workers_per_gather"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum predicate locks per transaction", + config["pg"]["max_pred_locks_per_transaction"]["description"], + ) + self.assertEqual( + 5120, config["pg"]["max_pred_locks_per_transaction"]["maximum"] + ) + self.assertEqual( + 64, config["pg"]["max_pred_locks_per_transaction"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_pred_locks_per_transaction"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_pred_locks_per_transaction"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum replication slots", + config["pg"]["max_replication_slots"]["description"], + ) + self.assertEqual(64, config["pg"]["max_replication_slots"]["maximum"]) + self.assertEqual(8, config["pg"]["max_replication_slots"]["minimum"]) + self.assertFalse( + config["pg"]["max_replication_slots"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_replication_slots"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", + config["pg"]["max_slot_wal_keep_size"]["description"], + ) + self.assertEqual( + 2147483647, config["pg"]["max_slot_wal_keep_size"]["maximum"] + ) + self.assertEqual(-1, config["pg"]["max_slot_wal_keep_size"]["minimum"]) + self.assertFalse( + config["pg"]["max_slot_wal_keep_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_slot_wal_keep_size"]["type"] + ) + + self.assertEqual( + "Maximum depth of the stack in bytes", + config["pg"]["max_stack_depth"]["description"], + ) + self.assertEqual(6291456, config["pg"]["max_stack_depth"]["maximum"]) + self.assertEqual(2097152, config["pg"]["max_stack_depth"]["minimum"]) + self.assertFalse(config["pg"]["max_stack_depth"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["max_stack_depth"]["type"]) + + self.assertEqual( + "Max standby archive delay in milliseconds", + config["pg"]["max_standby_archive_delay"]["description"], + ) + self.assertEqual( + 43200000, config["pg"]["max_standby_archive_delay"]["maximum"] + ) + self.assertEqual( + 1, config["pg"]["max_standby_archive_delay"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_standby_archive_delay"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_standby_archive_delay"]["type"] + ) + + self.assertEqual( + "Max standby streaming delay in milliseconds", + config["pg"]["max_standby_streaming_delay"]["description"], + ) + self.assertEqual( + 43200000, config["pg"]["max_standby_streaming_delay"]["maximum"] + ) + self.assertEqual( + 1, config["pg"]["max_standby_streaming_delay"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_standby_streaming_delay"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_standby_streaming_delay"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum WAL senders", + config["pg"]["max_wal_senders"]["description"], + ) + self.assertEqual(64, config["pg"]["max_wal_senders"]["maximum"]) + self.assertEqual(20, config["pg"]["max_wal_senders"]["minimum"]) + self.assertFalse(config["pg"]["max_wal_senders"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["max_wal_senders"]["type"]) + + self.assertEqual( + "Sets the maximum number of background processes that the system can support", + config["pg"]["max_worker_processes"]["description"], + ) + self.assertEqual(96, config["pg"]["max_worker_processes"]["maximum"]) + self.assertEqual(8, config["pg"]["max_worker_processes"]["minimum"]) + self.assertFalse( + config["pg"]["max_worker_processes"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_worker_processes"]["type"] + ) + + self.assertEqual( + "Chooses the algorithm for encrypting passwords.", + config["pg"]["password_encryption"]["description"], + ) + self.assertEqual( + ["md5", "scram-sha-256"], + config["pg"]["password_encryption"]["enum"], + ) + self.assertEqual( + "scram-sha-256", config["pg"]["password_encryption"]["example"] + ) + self.assertFalse( + config["pg"]["password_encryption"]["requires_restart"] + ) + self.assertEqual( + ["string", "null"], config["pg"]["password_encryption"]["type"] + ) + + self.assertEqual( + "Sets the time interval to run pg_partman's scheduled tasks", + config["pg"]["pg_partman_bgw.interval"]["description"], + ) + self.assertEqual( + 3600, config["pg"]["pg_partman_bgw.interval"]["example"] + ) + self.assertEqual( + 604800, config["pg"]["pg_partman_bgw.interval"]["maximum"] + ) + self.assertEqual( + 3600, config["pg"]["pg_partman_bgw.interval"]["minimum"] + ) + self.assertFalse( + config["pg"]["pg_partman_bgw.interval"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["pg_partman_bgw.interval"]["type"] + ) + + self.assertEqual( + "Controls which role to use for pg_partman's scheduled background tasks.", + config["pg"]["pg_partman_bgw.role"]["description"], + ) + self.assertEqual( + "myrolename", config["pg"]["pg_partman_bgw.role"]["example"] + ) + self.assertEqual(64, config["pg"]["pg_partman_bgw.role"]["maxLength"]) + self.assertEqual( + "^[_A-Za-z0-9][-._A-Za-z0-9]{0,63}$", + config["pg"]["pg_partman_bgw.role"]["pattern"], + ) + self.assertFalse( + config["pg"]["pg_partman_bgw.role"]["requires_restart"] + ) + self.assertEqual("string", config["pg"]["pg_partman_bgw.role"]["type"]) + + self.assertEqual( + "Enables or disables query plan monitoring", + config["pg"]["pg_stat_monitor.pgsm_enable_query_plan"][ + "description" + ], + ) + self.assertFalse( + config["pg"]["pg_stat_monitor.pgsm_enable_query_plan"]["example"] + ) + self.assertFalse( + config["pg"]["pg_stat_monitor.pgsm_enable_query_plan"][ + "requires_restart" + ] + ) + self.assertEqual( + "boolean", + config["pg"]["pg_stat_monitor.pgsm_enable_query_plan"]["type"], + ) + + self.assertEqual( + "Sets the maximum number of buckets", + config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["description"], + ) + self.assertEqual( + 10, config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["example"] + ) + self.assertEqual( + 10, config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["maximum"] + ) + self.assertEqual( + 1, config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["minimum"] + ) + self.assertFalse( + config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["type"] + ) + + self.assertEqual( + "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + config["pg"]["pg_stat_statements.track"]["description"], + ) + self.assertEqual( + ["all", "top", "none"], + config["pg"]["pg_stat_statements.track"]["enum"], + ) + self.assertFalse( + config["pg"]["pg_stat_statements.track"]["requires_restart"] + ) + self.assertEqual( + ["string"], config["pg"]["pg_stat_statements.track"]["type"] + ) + + self.assertEqual( + "PostgreSQL temporary file limit in KiB, -1 for unlimited", + config["pg"]["temp_file_limit"]["description"], + ) + self.assertEqual(5000000, config["pg"]["temp_file_limit"]["example"]) + self.assertEqual(2147483647, config["pg"]["temp_file_limit"]["maximum"]) + self.assertEqual(-1, config["pg"]["temp_file_limit"]["minimum"]) + self.assertFalse(config["pg"]["temp_file_limit"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["temp_file_limit"]["type"]) + + self.assertEqual( + "PostgreSQL service timezone", + config["pg"]["timezone"]["description"], + ) + self.assertEqual("Europe/Helsinki", config["pg"]["timezone"]["example"]) + self.assertEqual(64, config["pg"]["timezone"]["maxLength"]) + self.assertEqual("^[\\w/]*$", config["pg"]["timezone"]["pattern"]) + self.assertFalse(config["pg"]["timezone"]["requires_restart"]) + self.assertEqual("string", config["pg"]["timezone"]["type"]) + + self.assertEqual( + "Specifies the number of bytes reserved to track the currently executing command for each active session.", + config["pg"]["track_activity_query_size"]["description"], + ) + self.assertEqual( + 1024, config["pg"]["track_activity_query_size"]["example"] + ) + self.assertEqual( + 10240, config["pg"]["track_activity_query_size"]["maximum"] + ) + self.assertEqual( + 1024, config["pg"]["track_activity_query_size"]["minimum"] + ) + self.assertFalse( + config["pg"]["track_activity_query_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["track_activity_query_size"]["type"] + ) + + self.assertEqual( + "Record commit time of transactions.", + config["pg"]["track_commit_timestamp"]["description"], + ) + self.assertEqual( + "off", config["pg"]["track_commit_timestamp"]["example"] + ) + self.assertEqual( + ["off", "on"], config["pg"]["track_commit_timestamp"]["enum"] + ) + self.assertFalse( + config["pg"]["track_commit_timestamp"]["requires_restart"] + ) + self.assertEqual( + "string", config["pg"]["track_commit_timestamp"]["type"] + ) + + self.assertEqual( + "Enables tracking of function call counts and time used.", + config["pg"]["track_functions"]["description"], + ) + self.assertEqual( + ["all", "pl", "none"], config["pg"]["track_functions"]["enum"] + ) + self.assertFalse(config["pg"]["track_functions"]["requires_restart"]) + self.assertEqual("string", config["pg"]["track_functions"]["type"]) + + self.assertEqual( + "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", + config["pg"]["track_io_timing"]["description"], + ) + self.assertEqual("off", config["pg"]["track_io_timing"]["example"]) + self.assertEqual(["off", "on"], config["pg"]["track_io_timing"]["enum"]) + self.assertFalse(config["pg"]["track_io_timing"]["requires_restart"]) + self.assertEqual("string", config["pg"]["track_io_timing"]["type"]) + + self.assertEqual( + "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", + config["pg"]["wal_sender_timeout"]["description"], + ) + self.assertEqual(60000, config["pg"]["wal_sender_timeout"]["example"]) + self.assertFalse(config["pg"]["wal_sender_timeout"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["wal_sender_timeout"]["type"]) + + self.assertEqual( + "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance", + config["pg"]["wal_writer_delay"]["description"], + ) + self.assertEqual(50, config["pg"]["wal_writer_delay"]["example"]) + self.assertEqual(200, config["pg"]["wal_writer_delay"]["maximum"]) + self.assertEqual(10, config["pg"]["wal_writer_delay"]["minimum"]) + self.assertFalse(config["pg"]["wal_writer_delay"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["wal_writer_delay"]["type"]) + + self.assertEqual( + "Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted. When this extension is enabled, pg_stat_statements results for utility commands are unreliable", + config["pg_stat_monitor_enable"]["description"], + ) + self.assertTrue(config["pg_stat_monitor_enable"]["requires_restart"]) + self.assertEqual("boolean", config["pg_stat_monitor_enable"]["type"]) + + self.assertEqual( + "Number of seconds of master unavailability before triggering database failover to standby", + config["pglookout"]["max_failover_replication_time_lag"][ + "description" + ], + ) + self.assertEqual( + int(9223372036854775000), + config["pglookout"]["max_failover_replication_time_lag"]["maximum"], + ) + self.assertEqual( + int(10), + config["pglookout"]["max_failover_replication_time_lag"]["minimum"], + ) + self.assertFalse( + config["pglookout"]["max_failover_replication_time_lag"][ + "requires_restart" + ] + ) + self.assertEqual( + "integer", + config["pglookout"]["max_failover_replication_time_lag"]["type"], + ) + + self.assertEqual( + "Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.", + config["shared_buffers_percentage"]["description"], + ) + self.assertEqual(41.5, config["shared_buffers_percentage"]["example"]) + self.assertEqual(60.0, config["shared_buffers_percentage"]["maximum"]) + self.assertEqual(20.0, config["shared_buffers_percentage"]["minimum"]) + self.assertFalse( + config["shared_buffers_percentage"]["requires_restart"] + ) + self.assertEqual("number", config["shared_buffers_percentage"]["type"]) + + self.assertEqual( + "Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).", + config["work_mem"]["description"], + ) + self.assertEqual(4, config["work_mem"]["example"]) + self.assertEqual(1024, config["work_mem"]["maximum"]) + self.assertEqual(1, config["work_mem"]["minimum"]) + self.assertFalse(config["work_mem"]["requires_restart"]) + self.assertEqual("integer", config["work_mem"]["type"]) + class PostgreSQLDatabaseTest(ClientBaseCase): """ diff --git a/test/unit/objects/database_test.py b/test/unit/objects/database_test.py index 51c7de4cd..8605e43c5 100644 --- a/test/unit/objects/database_test.py +++ b/test/unit/objects/database_test.py @@ -1,7 +1,13 @@ import logging from test.unit.base import ClientBaseCase -from linode_api4 import PostgreSQLDatabase +from linode_api4 import ( + MySQLDatabaseConfigMySQLOptions, + MySQLDatabaseConfigOptions, + PostgreSQLDatabase, + PostgreSQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigPGOptions, +) from linode_api4.objects import MySQLDatabase logger = logging.getLogger(__name__) @@ -103,6 +109,59 @@ def test_get_instances(self): self.assertEqual(dbs[0].region, "us-east") self.assertEqual(dbs[0].updates.duration, 3) self.assertEqual(dbs[0].version, "8.0.26") + self.assertEqual(dbs[0].engine_config.binlog_retention_period, 600) + self.assertEqual(dbs[0].engine_config.mysql.connect_timeout, 10) + self.assertEqual(dbs[0].engine_config.mysql.default_time_zone, "+03:00") + self.assertEqual(dbs[0].engine_config.mysql.group_concat_max_len, 1024) + self.assertEqual( + dbs[0].engine_config.mysql.information_schema_stats_expiry, 86400 + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_change_buffer_max_size, 30 + ) + self.assertEqual(dbs[0].engine_config.mysql.innodb_flush_neighbors, 0) + self.assertEqual(dbs[0].engine_config.mysql.innodb_ft_min_token_size, 3) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_ft_server_stopword_table, + "db_name/table_name", + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_lock_wait_timeout, 50 + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_log_buffer_size, 16777216 + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_online_alter_log_max_size, + 134217728, + ) + self.assertEqual(dbs[0].engine_config.mysql.innodb_read_io_threads, 10) + self.assertTrue(dbs[0].engine_config.mysql.innodb_rollback_on_timeout) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_thread_concurrency, 10 + ) + self.assertEqual(dbs[0].engine_config.mysql.innodb_write_io_threads, 10) + self.assertEqual(dbs[0].engine_config.mysql.interactive_timeout, 3600) + self.assertEqual( + dbs[0].engine_config.mysql.internal_tmp_mem_storage_engine, + "TempTable", + ) + self.assertEqual( + dbs[0].engine_config.mysql.max_allowed_packet, 67108864 + ) + self.assertEqual( + dbs[0].engine_config.mysql.max_heap_table_size, 16777216 + ) + self.assertEqual(dbs[0].engine_config.mysql.net_buffer_length, 16384) + self.assertEqual(dbs[0].engine_config.mysql.net_read_timeout, 30) + self.assertEqual(dbs[0].engine_config.mysql.net_write_timeout, 30) + self.assertEqual(dbs[0].engine_config.mysql.sort_buffer_size, 262144) + self.assertEqual( + dbs[0].engine_config.mysql.sql_mode, "ANSI,TRADITIONAL" + ) + self.assertTrue(dbs[0].engine_config.mysql.sql_require_primary_key) + self.assertEqual(dbs[0].engine_config.mysql.tmp_table_size, 16777216) + self.assertEqual(dbs[0].engine_config.mysql.wait_timeout, 28800) def test_create(self): """ @@ -121,6 +180,12 @@ def test_create(self): "mysql/8.0.26", "g6-standard-1", cluster_size=3, + engine_config=MySQLDatabaseConfigOptions( + mysql=MySQLDatabaseConfigMySQLOptions( + connect_timeout=20 + ), + binlog_retention_period=200, + ), ) except Exception as e: logger.warning( @@ -134,6 +199,12 @@ def test_create(self): self.assertEqual(m.call_data["engine"], "mysql/8.0.26") self.assertEqual(m.call_data["type"], "g6-standard-1") self.assertEqual(m.call_data["cluster_size"], 3) + self.assertEqual( + m.call_data["engine_config"]["mysql"]["connect_timeout"], 20 + ) + self.assertEqual( + m.call_data["engine_config"]["binlog_retention_period"], 200 + ) def test_update(self): """ @@ -148,6 +219,10 @@ def test_update(self): db.updates.day_of_week = 2 db.allow_list = new_allow_list db.label = "cool" + db.engine_config = MySQLDatabaseConfigOptions( + mysql=MySQLDatabaseConfigMySQLOptions(connect_timeout=20), + binlog_retention_period=200, + ) db.save() @@ -156,6 +231,12 @@ def test_update(self): self.assertEqual(m.call_data["label"], "cool") self.assertEqual(m.call_data["updates"]["day_of_week"], 2) self.assertEqual(m.call_data["allow_list"], new_allow_list) + self.assertEqual( + m.call_data["engine_config"]["mysql"]["connect_timeout"], 20 + ) + self.assertEqual( + m.call_data["engine_config"]["binlog_retention_period"], 200 + ) def test_list_backups(self): """ @@ -321,6 +402,97 @@ def test_get_instances(self): self.assertEqual(dbs[0].updates.duration, 3) self.assertEqual(dbs[0].version, "13.2") + print(dbs[0].engine_config.pg.__dict__) + + self.assertTrue(dbs[0].engine_config.pg_stat_monitor_enable) + self.assertEqual( + dbs[0].engine_config.pglookout.max_failover_replication_time_lag, + 1000, + ) + self.assertEqual(dbs[0].engine_config.shared_buffers_percentage, 41.5) + self.assertEqual(dbs[0].engine_config.work_mem, 4) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_analyze_scale_factor, 0.5 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_analyze_threshold, 100 + ) + self.assertEqual(dbs[0].engine_config.pg.autovacuum_max_workers, 10) + self.assertEqual(dbs[0].engine_config.pg.autovacuum_naptime, 100) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_cost_delay, 50 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_cost_limit, 100 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_scale_factor, 0.5 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_threshold, 100 + ) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_delay, 200) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_flush_after, 512) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_lru_maxpages, 100) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_lru_multiplier, 2.0) + self.assertEqual(dbs[0].engine_config.pg.deadlock_timeout, 1000) + self.assertEqual( + dbs[0].engine_config.pg.default_toast_compression, "lz4" + ) + self.assertEqual( + dbs[0].engine_config.pg.idle_in_transaction_session_timeout, 100 + ) + self.assertTrue(dbs[0].engine_config.pg.jit) + self.assertEqual(dbs[0].engine_config.pg.max_files_per_process, 100) + self.assertEqual(dbs[0].engine_config.pg.max_locks_per_transaction, 100) + self.assertEqual( + dbs[0].engine_config.pg.max_logical_replication_workers, 32 + ) + self.assertEqual(dbs[0].engine_config.pg.max_parallel_workers, 64) + self.assertEqual( + dbs[0].engine_config.pg.max_parallel_workers_per_gather, 64 + ) + self.assertEqual( + dbs[0].engine_config.pg.max_pred_locks_per_transaction, 1000 + ) + self.assertEqual(dbs[0].engine_config.pg.max_replication_slots, 32) + self.assertEqual(dbs[0].engine_config.pg.max_slot_wal_keep_size, 100) + self.assertEqual(dbs[0].engine_config.pg.max_stack_depth, 3507152) + self.assertEqual( + dbs[0].engine_config.pg.max_standby_archive_delay, 1000 + ) + self.assertEqual( + dbs[0].engine_config.pg.max_standby_streaming_delay, 1000 + ) + self.assertEqual(dbs[0].engine_config.pg.max_wal_senders, 32) + self.assertEqual(dbs[0].engine_config.pg.max_worker_processes, 64) + self.assertEqual( + dbs[0].engine_config.pg.password_encryption, "scram-sha-256" + ) + self.assertEqual(dbs[0].engine_config.pg.pg_partman_bgw_interval, 3600) + self.assertEqual( + dbs[0].engine_config.pg.pg_partman_bgw_role, "myrolename" + ) + self.assertFalse( + dbs[0].engine_config.pg.pg_stat_monitor_pgsm_enable_query_plan + ) + self.assertEqual( + dbs[0].engine_config.pg.pg_stat_monitor_pgsm_max_buckets, 10 + ) + self.assertEqual( + dbs[0].engine_config.pg.pg_stat_statements_track, "top" + ) + self.assertEqual(dbs[0].engine_config.pg.temp_file_limit, 5000000) + self.assertEqual(dbs[0].engine_config.pg.timezone, "Europe/Helsinki") + self.assertEqual( + dbs[0].engine_config.pg.track_activity_query_size, 1024 + ) + self.assertEqual(dbs[0].engine_config.pg.track_commit_timestamp, "off") + self.assertEqual(dbs[0].engine_config.pg.track_functions, "all") + self.assertEqual(dbs[0].engine_config.pg.track_io_timing, "off") + self.assertEqual(dbs[0].engine_config.pg.wal_sender_timeout, 60000) + self.assertEqual(dbs[0].engine_config.pg.wal_writer_delay, 50) + def test_create(self): """ Test that PostgreSQL databases can be created @@ -336,6 +508,17 @@ def test_create(self): "postgresql/13.2", "g6-standard-1", cluster_size=3, + engine_config=PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_scale_factor=0.5, + pg_partman_bgw_interval=3600, + pg_partman_bgw_role="myrolename", + pg_stat_monitor_pgsm_enable_query_plan=False, + pg_stat_monitor_pgsm_max_buckets=10, + pg_stat_statements_track="top", + ), + work_mem=4, + ), ) except Exception: pass @@ -347,6 +530,37 @@ def test_create(self): self.assertEqual(m.call_data["engine"], "postgresql/13.2") self.assertEqual(m.call_data["type"], "g6-standard-1") self.assertEqual(m.call_data["cluster_size"], 3) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "autovacuum_analyze_scale_factor" + ], + 0.5, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"]["pg_partman_bgw.interval"], + 3600, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"]["pg_partman_bgw.role"], + "myrolename", + ) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "pg_stat_monitor.pgsm_enable_query_plan" + ], + False, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "pg_stat_monitor.pgsm_max_buckets" + ], + 10, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"]["pg_stat_statements.track"], + "top", + ) + self.assertEqual(m.call_data["engine_config"]["work_mem"], 4) def test_update(self): """ @@ -361,6 +575,12 @@ def test_update(self): db.updates.day_of_week = 2 db.allow_list = new_allow_list db.label = "cool" + db.engine_config = PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_scale_factor=0.5 + ), + work_mem=4, + ) db.save() @@ -369,6 +589,13 @@ def test_update(self): self.assertEqual(m.call_data["label"], "cool") self.assertEqual(m.call_data["updates"]["day_of_week"], 2) self.assertEqual(m.call_data["allow_list"], new_allow_list) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "autovacuum_analyze_scale_factor" + ], + 0.5, + ) + self.assertEqual(m.call_data["engine_config"]["work_mem"], 4) def test_list_backups(self): """ From ff344ddd4f1a96332669b23b241b7a08b2a96be4 Mon Sep 17 00:00:00 2001 From: Zhiwei Liang <121905282+zliang-akamai@users.noreply.github.com> Date: Fri, 23 May 2025 20:46:03 -0400 Subject: [PATCH 12/34] Deprecate `Event.mark_read()` (#551) --- linode_api4/objects/account.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/linode_api4/objects/account.py b/linode_api4/objects/account.py index c7318d871..836f41522 100644 --- a/linode_api4/objects/account.py +++ b/linode_api4/objects/account.py @@ -3,6 +3,7 @@ from datetime import datetime import requests +from deprecated import deprecated from linode_api4.errors import ApiError, UnexpectedResponseError from linode_api4.objects import DATE_FORMAT, Volume @@ -305,6 +306,12 @@ def volume(self): return Volume(self._client, self.entity.id) return None + @deprecated( + reason="`mark_read` API is deprecated. Use the 'mark_seen' " + "API instead. Please note that the `mark_seen` API functions " + "differently and will mark all events up to and including the " + "referenced event-id as 'seen' rather than individual events.", + ) def mark_read(self): """ Marks a single Event as read. From 8ea18d1c760441847a13974b6298c8984582b91e Mon Sep 17 00:00:00 2001 From: Erik Zilber Date: Tue, 27 May 2025 09:07:35 -0400 Subject: [PATCH 13/34] Added missing doc links (#556) --- linode_api4/groups/database.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/linode_api4/groups/database.py b/linode_api4/groups/database.py index fec3df929..9de02ac35 100644 --- a/linode_api4/groups/database.py +++ b/linode_api4/groups/database.py @@ -73,7 +73,7 @@ def mysql_config_options(self): """ Returns a detailed list of all the configuration options for MySQL Databases. - API Documentation: TODO + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-mysql-config :returns: The JSON configuration options for MySQL Databases. """ @@ -83,7 +83,7 @@ def postgresql_config_options(self): """ Returns a detailed list of all the configuration options for PostgreSQL Databases. - API Documentation: TODO + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-postgresql-config :returns: The JSON configuration options for PostgreSQL Databases. """ From fc620df16898ae3ec43316f5ee6ae8f9b95a7459 Mon Sep 17 00:00:00 2001 From: Lena Garber <114949949+lgarber-akamai@users.noreply.github.com> Date: Tue, 27 May 2025 11:13:21 -0400 Subject: [PATCH 14/34] project: UDP NodeBalancers (#549) * Add support for Nodebalancers UDP (#494) * Implemented changes for NodeBalancers UDP * Added unit tests * Fix lint * Fixed issue with cipher_suite in save * Lint * Addressed PR comments * Removed overriden _serialize method * Drop residual prints * Implement _serialize(...) override in NodeBalancerConfig (#555) * Add LA notice --------- Co-authored-by: Jacob Riddle <87780794+jriddle-linode@users.noreply.github.com> Co-authored-by: Zhiwei Liang <121905282+zliang-akamai@users.noreply.github.com> Co-authored-by: Ye Chen <127243817+yec-akamai@users.noreply.github.com> Co-authored-by: Youjung Kim <126618609+ykim-1@users.noreply.github.com> Co-authored-by: Erik Zilber Co-authored-by: Youjung Kim <126618609+ykim-akamai@users.noreply.github.com> --- linode_api4/objects/nodebalancer.py | 19 +++ .../nodebalancers_123456_configs.json | 28 ++++- ...ebalancers_123456_configs_65432_nodes.json | 12 +- .../models/nodebalancer/test_nodebalancer.py | 110 +++++++++++++++++- test/unit/objects/nodebalancers_test.py | 20 ++++ 5 files changed, 186 insertions(+), 3 deletions(-) diff --git a/linode_api4/objects/nodebalancer.py b/linode_api4/objects/nodebalancer.py index 840d5b965..f02dda269 100644 --- a/linode_api4/objects/nodebalancer.py +++ b/linode_api4/objects/nodebalancer.py @@ -77,6 +77,8 @@ class NodeBalancerConfig(DerivedBase): The configuration information for a single port of this NodeBalancer. API documentation: https://techdocs.akamai.com/linode-api/reference/get-node-balancer-config + + NOTE: UDP NodeBalancer Configs may not currently be available to all users. """ api_endpoint = "/nodebalancers/{nodebalancer_id}/configs/{id}" @@ -97,6 +99,8 @@ class NodeBalancerConfig(DerivedBase): "check_path": Property(mutable=True), "check_body": Property(mutable=True), "check_passive": Property(mutable=True), + "udp_check_port": Property(mutable=True), + "udp_session_timeout": Property(), "ssl_cert": Property(mutable=True), "ssl_key": Property(mutable=True), "ssl_commonname": Property(), @@ -106,6 +110,20 @@ class NodeBalancerConfig(DerivedBase): "proxy_protocol": Property(mutable=True), } + def _serialize(self, is_put: bool = False): + """ + This override removes the `cipher_suite` field from the PUT request + body on calls to save(...) for UDP configs, which is rejected by + the API. + """ + + result = super()._serialize(is_put) + + if is_put and result["protocol"] == "udp" and "cipher_suite" in result: + del result["cipher_suite"] + + return result + @property def nodes(self): """ @@ -233,6 +251,7 @@ class NodeBalancer(Base): "configs": Property(derived_class=NodeBalancerConfig), "transfer": Property(), "tags": Property(mutable=True, unordered=True), + "client_udp_sess_throttle": Property(mutable=True), } # create derived objects diff --git a/test/fixtures/nodebalancers_123456_configs.json b/test/fixtures/nodebalancers_123456_configs.json index f12f1345f..cab9fb981 100644 --- a/test/fixtures/nodebalancers_123456_configs.json +++ b/test/fixtures/nodebalancers_123456_configs.json @@ -24,9 +24,35 @@ "protocol": "http", "ssl_fingerprint": "", "proxy_protocol": "none" + }, + { + "check": "connection", + "check_attempts": 2, + "stickiness": "table", + "check_interval": 5, + "check_body": "", + "id": 65431, + "check_passive": true, + "algorithm": "roundrobin", + "check_timeout": 3, + "check_path": "/", + "ssl_cert": null, + "ssl_commonname": "", + "port": 80, + "nodebalancer_id": 123456, + "cipher_suite": "none", + "ssl_key": null, + "nodes_status": { + "up": 0, + "down": 0 + }, + "protocol": "udp", + "ssl_fingerprint": "", + "proxy_protocol": "none", + "udp_check_port": 12345 } ], - "results": 1, + "results": 2, "page": 1, "pages": 1 } diff --git a/test/fixtures/nodebalancers_123456_configs_65432_nodes.json b/test/fixtures/nodebalancers_123456_configs_65432_nodes.json index 658edbb50..f8ffd9edf 100644 --- a/test/fixtures/nodebalancers_123456_configs_65432_nodes.json +++ b/test/fixtures/nodebalancers_123456_configs_65432_nodes.json @@ -9,9 +9,19 @@ "mode": "accept", "config_id": 54321, "nodebalancer_id": 123456 + }, + { + "id": 12345, + "address": "192.168.210.120", + "label": "node12345", + "status": "UP", + "weight": 50, + "mode": "none", + "config_id": 123456, + "nodebalancer_id": 123456 } ], "pages": 1, "page": 1, - "results": 1 + "results": 2 } diff --git a/test/integration/models/nodebalancer/test_nodebalancer.py b/test/integration/models/nodebalancer/test_nodebalancer.py index 21f4d0322..df07de215 100644 --- a/test/integration/models/nodebalancer/test_nodebalancer.py +++ b/test/integration/models/nodebalancer/test_nodebalancer.py @@ -9,7 +9,7 @@ import pytest -from linode_api4 import ApiError, LinodeClient +from linode_api4 import ApiError, LinodeClient, NodeBalancer from linode_api4.objects import ( NodeBalancerConfig, NodeBalancerNode, @@ -64,6 +64,55 @@ def create_nb_config(test_linode_client, e2e_test_firewall): nb.delete() +@pytest.fixture(scope="session") +def create_nb_config_with_udp(test_linode_client, e2e_test_firewall): + client = test_linode_client + label = get_test_label(8) + + nb = client.nodebalancer_create( + region=TEST_REGION, label=label, firewall=e2e_test_firewall.id + ) + + config = nb.config_create(protocol="udp", udp_check_port=1234) + + yield config + + config.delete() + nb.delete() + + +@pytest.fixture(scope="session") +def create_nb(test_linode_client, e2e_test_firewall): + client = test_linode_client + label = get_test_label(8) + + nb = client.nodebalancer_create( + region=TEST_REGION, label=label, firewall=e2e_test_firewall.id + ) + + yield nb + + nb.delete() + + +def test_create_nb(test_linode_client, e2e_test_firewall): + client = test_linode_client + label = get_test_label(8) + + nb = client.nodebalancer_create( + region=TEST_REGION, + label=label, + firewall=e2e_test_firewall.id, + client_udp_sess_throttle=5, + ) + + assert TEST_REGION, nb.region + assert label == nb.label + assert 5 == nb.client_udp_sess_throttle + + nb.delete() + + def test_get_nodebalancer_config(test_linode_client, create_nb_config): config = test_linode_client.load( NodeBalancerConfig, @@ -72,6 +121,65 @@ def test_get_nodebalancer_config(test_linode_client, create_nb_config): ) +def test_get_nb_config_with_udp(test_linode_client, create_nb_config_with_udp): + config = test_linode_client.load( + NodeBalancerConfig, + create_nb_config_with_udp.id, + create_nb_config_with_udp.nodebalancer_id, + ) + + assert "udp" == config.protocol + assert 1234 == config.udp_check_port + assert 16 == config.udp_session_timeout + + +def test_update_nb_config(test_linode_client, create_nb_config_with_udp): + config = test_linode_client.load( + NodeBalancerConfig, + create_nb_config_with_udp.id, + create_nb_config_with_udp.nodebalancer_id, + ) + + config.udp_check_port = 4321 + config.save() + + config_updated = test_linode_client.load( + NodeBalancerConfig, + create_nb_config_with_udp.id, + create_nb_config_with_udp.nodebalancer_id, + ) + + assert 4321 == config_updated.udp_check_port + + +def test_get_nb(test_linode_client, create_nb): + nb = test_linode_client.load( + NodeBalancer, + create_nb.id, + ) + + assert nb.id == create_nb.id + + +def test_update_nb(test_linode_client, create_nb): + nb = test_linode_client.load( + NodeBalancer, + create_nb.id, + ) + + nb.label = "ThisNewLabel" + nb.client_udp_sess_throttle = 5 + nb.save() + + nb_updated = test_linode_client.load( + NodeBalancer, + create_nb.id, + ) + + assert "ThisNewLabel" == nb_updated.label + assert 5 == nb_updated.client_udp_sess_throttle + + @pytest.mark.smoke def test_create_nb_node( test_linode_client, create_nb_config, linode_with_private_ip diff --git a/test/unit/objects/nodebalancers_test.py b/test/unit/objects/nodebalancers_test.py index 05f0ad7de..ed0f0c320 100644 --- a/test/unit/objects/nodebalancers_test.py +++ b/test/unit/objects/nodebalancers_test.py @@ -42,6 +42,23 @@ def test_get_config(self): self.assertEqual(config.ssl_fingerprint, "") self.assertEqual(config.proxy_protocol, "none") + config_udp = NodeBalancerConfig(self.client, 65431, 123456) + self.assertEqual(config_udp.protocol, "udp") + self.assertEqual(config_udp.udp_check_port, 12345) + + def test_update_config_udp(self): + """ + Tests that a config with a protocol of udp can be updated and that cipher suite is properly excluded in save() + """ + with self.mock_put("nodebalancers/123456/configs/65431") as m: + config = self.client.load(NodeBalancerConfig, 65431, 123456) + config.udp_check_port = 54321 + config.save() + + self.assertEqual(m.call_url, "/nodebalancers/123456/configs/65431") + self.assertEqual(m.call_data["udp_check_port"], 54321) + self.assertNotIn("cipher_suite", m.call_data) + class NodeBalancerNodeTest(ClientBaseCase): """ @@ -66,6 +83,9 @@ def test_get_node(self): self.assertEqual(node.config_id, 65432) self.assertEqual(node.nodebalancer_id, 123456) + node_udp = NodeBalancerNode(self.client, 12345, (65432, 123456)) + self.assertEqual(node_udp.mode, "none") + def test_create_node(self): """ Tests that a node can be created From 0b1b2af4f64ac0e4557029b82ba966b394b5c2ca Mon Sep 17 00:00:00 2001 From: pmajali Date: Mon, 2 Jun 2025 23:37:26 +0530 Subject: [PATCH 15/34] Adding SDK changes for ACLP APIs (#528) * adding monitor APIs * updating doc * updating tests * updating lint errors * Updating method name Co-authored-by: Lena Garber <114949949+lgarber-akamai@users.noreply.github.com> * updating code with review comments * sorting imports * updating with make format changes * updating with review comments * review updates * updating docstring * updating func names * PR comments * review comments * adding __all__ module * updating unittest and entity_id to any --------- Co-authored-by: Lena Garber <114949949+lgarber-akamai@users.noreply.github.com> --- linode_api4/groups/__init__.py | 1 + linode_api4/groups/monitor.py | 153 +++++++++++++++ linode_api4/linode_client.py | 3 + linode_api4/objects/__init__.py | 1 + linode_api4/objects/monitor.py | 180 ++++++++++++++++++ test/fixtures/monitor_dashboards.json | 37 ++++ test/fixtures/monitor_dashboards_1.json | 30 +++ test/fixtures/monitor_services.json | 11 ++ test/fixtures/monitor_services_dbaas.json | 11 ++ .../monitor_services_dbaas_dashboards.json | 37 ++++ ...tor_services_dbaas_metric-definitions.json | 55 ++++++ .../monitor_services_dbaas_token.json | 3 + .../monitor_services_linode_token.json | 3 + .../models/monitor/test_monitor.py | 109 +++++++++++ test/unit/objects/monitor_test.py | 123 ++++++++++++ 15 files changed, 757 insertions(+) create mode 100644 linode_api4/groups/monitor.py create mode 100644 linode_api4/objects/monitor.py create mode 100644 test/fixtures/monitor_dashboards.json create mode 100644 test/fixtures/monitor_dashboards_1.json create mode 100644 test/fixtures/monitor_services.json create mode 100644 test/fixtures/monitor_services_dbaas.json create mode 100644 test/fixtures/monitor_services_dbaas_dashboards.json create mode 100644 test/fixtures/monitor_services_dbaas_metric-definitions.json create mode 100644 test/fixtures/monitor_services_dbaas_token.json create mode 100644 test/fixtures/monitor_services_linode_token.json create mode 100644 test/integration/models/monitor/test_monitor.py create mode 100644 test/unit/objects/monitor_test.py diff --git a/linode_api4/groups/__init__.py b/linode_api4/groups/__init__.py index e50eeab66..3842042ad 100644 --- a/linode_api4/groups/__init__.py +++ b/linode_api4/groups/__init__.py @@ -10,6 +10,7 @@ from .lke import * from .lke_tier import * from .longview import * +from .monitor import * from .networking import * from .nodebalancer import * from .object_storage import * diff --git a/linode_api4/groups/monitor.py b/linode_api4/groups/monitor.py new file mode 100644 index 000000000..908b4e819 --- /dev/null +++ b/linode_api4/groups/monitor.py @@ -0,0 +1,153 @@ +__all__ = [ + "MonitorGroup", +] +from typing import Any, Optional + +from linode_api4 import ( + PaginatedList, +) +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import ( + MonitorDashboard, + MonitorMetricsDefinition, + MonitorService, + MonitorServiceToken, +) + + +class MonitorGroup(Group): + """ + Encapsulates Monitor-related methods of the :any:`LinodeClient`. + + This group contains all features beneath the `/monitor` group in the API v4. + """ + + def dashboards( + self, *filters, service_type: Optional[str] = None + ) -> PaginatedList: + """ + Returns a list of dashboards. If `service_type` is provided, it fetches dashboards + for the specific service type. If None, it fetches all dashboards. + + dashboards = client.monitor.dashboards() + dashboard = client.load(MonitorDashboard, 1) + dashboards_by_service = client.monitor.dashboards(service_type="dbaas") + + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + API Documentation: + - All Dashboards: https://techdocs.akamai.com/linode-api/reference/get-dashboards-all + - Dashboards by Service: https://techdocs.akamai.com/linode-api/reference/get-dashboards + + :param service_type: The service type to get dashboards for. + :type service_type: Optional[str] + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Dashboards. + :rtype: PaginatedList of Dashboard + """ + endpoint = ( + f"/monitor/services/{service_type}/dashboards" + if service_type + else "/monitor/dashboards" + ) + + return self.client._get_and_filter( + MonitorDashboard, + *filters, + endpoint=endpoint, + ) + + def services( + self, *filters, service_type: Optional[str] = None + ) -> list[MonitorService]: + """ + Lists services supported by ACLP. + supported_services = client.monitor.services() + service_details = client.monitor.services(service_type="dbaas") + + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-services + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-services-for-service-type + + :param service_type: The service type to get details for. + :type service_type: Optional[str] + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: Lists monitor services by a given service_type + :rtype: PaginatedList of the Services + """ + endpoint = ( + f"/monitor/services/{service_type}" + if service_type + else "/monitor/services" + ) + return self.client._get_and_filter( + MonitorService, + *filters, + endpoint=endpoint, + ) + + def metric_definitions( + self, service_type: str, *filters + ) -> list[MonitorMetricsDefinition]: + """ + Returns metrics for a specific service type. + + metrics = client.monitor.list_metric_definitions(service_type="dbaas") + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-information + + :param service_type: The service type to get metrics for. + :type service_type: str + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: Returns a List of metrics for a service + :rtype: PaginatedList of metrics + """ + return self.client._get_and_filter( + MonitorMetricsDefinition, + *filters, + endpoint=f"/monitor/services/{service_type}/metric-definitions", + ) + + def create_token( + self, service_type: str, entity_ids: list[Any] + ) -> MonitorServiceToken: + """ + Returns a JWE Token for a specific service type. + token = client.monitor.create_token(service_type="dbaas", entity_ids=[1234]) + + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-get-token + + :param service_type: The service type to create token for. + :type service_type: str + :param entity_ids: The list of entity IDs for which the token is valid. + :type entity_ids: any + + :returns: Returns a token for a service + :rtype: str + """ + + params = {"entity_ids": entity_ids} + + result = self.client.post( + f"/monitor/services/{service_type}/token", data=params + ) + + if "token" not in result: + raise UnexpectedResponseError( + "Unexpected response when creating token!", json=result + ) + return MonitorServiceToken(token=result["token"]) diff --git a/linode_api4/linode_client.py b/linode_api4/linode_client.py index 19e6f3900..e71f1563e 100644 --- a/linode_api4/linode_client.py +++ b/linode_api4/linode_client.py @@ -19,6 +19,7 @@ LinodeGroup, LKEGroup, LongviewGroup, + MonitorGroup, NetworkingGroup, NodeBalancerGroup, ObjectStorageGroup, @@ -201,6 +202,8 @@ def __init__( #: Access methods related to VM placement - See :any:`PlacementAPIGroup` for more information. self.placement = PlacementAPIGroup(self) + self.monitor = MonitorGroup(self) + @property def _user_agent(self): return "{}python-linode_api4/{} {}".format( diff --git a/linode_api4/objects/__init__.py b/linode_api4/objects/__init__.py index b13fac51a..7f1542d2a 100644 --- a/linode_api4/objects/__init__.py +++ b/linode_api4/objects/__init__.py @@ -21,3 +21,4 @@ from .vpc import * from .beta import * from .placement import * +from .monitor import * diff --git a/linode_api4/objects/monitor.py b/linode_api4/objects/monitor.py new file mode 100644 index 000000000..f518e641d --- /dev/null +++ b/linode_api4/objects/monitor.py @@ -0,0 +1,180 @@ +__all__ = [ + "MonitorDashboard", + "MonitorMetricsDefinition", + "MonitorService", + "MonitorServiceToken", +] +from dataclasses import dataclass, field +from typing import List, Optional + +from linode_api4.objects import Base, JSONObject, Property, StrEnum + + +class AggregateFunction(StrEnum): + """ + Enum for supported aggregate functions. + """ + + min = "min" + max = "max" + avg = "avg" + sum = "sum" + count = "count" + rate = "rate" + increase = "increase" + last = "last" + + +class ChartType(StrEnum): + """ + Enum for supported chart types. + """ + + line = "line" + area = "area" + + +class ServiceType(StrEnum): + """ + Enum for supported service types. + """ + + dbaas = "dbaas" + linode = "linode" + lke = "lke" + vpc = "vpc" + nodebalancer = "nodebalancer" + firewall = "firewall" + object_storage = "object_storage" + aclb = "aclb" + + +class MetricType(StrEnum): + """ + Enum for supported metric type + """ + + gauge = "gauge" + counter = "counter" + histogram = "histogram" + summary = "summary" + + +class MetricUnit(StrEnum): + """ + Enum for supported metric units. + """ + + COUNT = "count" + PERCENT = "percent" + BYTE = "byte" + SECOND = "second" + BITS_PER_SECOND = "bits_per_second" + MILLISECOND = "millisecond" + KB = "KB" + MB = "MB" + GB = "GB" + RATE = "rate" + BYTES_PER_SECOND = "bytes_per_second" + PERCENTILE = "percentile" + RATIO = "ratio" + OPS_PER_SECOND = "ops_per_second" + IOPS = "iops" + + +class DashboardType(StrEnum): + """ + Enum for supported dashboard types. + """ + + standard = "standard" + custom = "custom" + + +@dataclass +class DashboardWidget(JSONObject): + """ + Represents a single widget in the widgets list. + """ + + metric: str = "" + unit: MetricUnit = "" + label: str = "" + color: str = "" + size: int = 0 + chart_type: ChartType = "" + y_label: str = "" + aggregate_function: AggregateFunction = "" + + +@dataclass +class Dimension(JSONObject): + """ + Represents a single dimension in the dimensions list. + """ + + dimension_label: Optional[str] = None + label: Optional[str] = None + values: Optional[List[str]] = None + + +@dataclass +class MonitorMetricsDefinition(JSONObject): + """ + Represents a single metric definition in the metrics definition list. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-information + """ + + metric: str = "" + label: str = "" + metric_type: MetricType = "" + unit: MetricUnit = "" + scrape_interval: int = 0 + is_alertable: bool = False + dimensions: Optional[List[Dimension]] = None + available_aggregate_functions: List[AggregateFunction] = field( + default_factory=list + ) + + +class MonitorDashboard(Base): + """ + Dashboard details. + + List dashboards: https://techdocs.akamai.com/linode-api/get-dashboards-all + """ + + api_endpoint = "/monitor/dashboards/{id}" + properties = { + "id": Property(identifier=True), + "created": Property(is_datetime=True), + "label": Property(), + "service_type": Property(ServiceType), + "type": Property(DashboardType), + "widgets": Property(List[DashboardWidget]), + "updated": Property(is_datetime=True), + } + + +@dataclass +class MonitorService(JSONObject): + """ + Represents a single service type. + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-services + + """ + + service_type: ServiceType = "" + label: str = "" + + +@dataclass +class MonitorServiceToken(JSONObject): + """ + A token for the requested service_type. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-get-token + """ + + token: str = "" diff --git a/test/fixtures/monitor_dashboards.json b/test/fixtures/monitor_dashboards.json new file mode 100644 index 000000000..42de92b55 --- /dev/null +++ b/test/fixtures/monitor_dashboards.json @@ -0,0 +1,37 @@ +{ + "data": [ + { + "created": "2024-10-10T05:01:58", + "id": 1, + "label": "Resource Usage", + "service_type": "dbaas", + "type": "standard", + "updated": "2024-10-10T05:01:58", + "widgets": [ + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "CPU Usage", + "metric": "cpu_usage", + "size": 12, + "unit": "%", + "y_label": "cpu_usage" + }, + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "Disk I/O Write", + "metric": "write_iops", + "size": 6, + "unit": "IOPS", + "y_label": "write_iops" + } + ] + } + ], + "page": 1, + "pages": 1, + "results": 1 + } \ No newline at end of file diff --git a/test/fixtures/monitor_dashboards_1.json b/test/fixtures/monitor_dashboards_1.json new file mode 100644 index 000000000..b78bf3447 --- /dev/null +++ b/test/fixtures/monitor_dashboards_1.json @@ -0,0 +1,30 @@ +{ + "created": "2024-10-10T05:01:58", + "id": 1, + "label": "Resource Usage", + "service_type": "dbaas", + "type": "standard", + "updated": "2024-10-10T05:01:58", + "widgets": [ + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "CPU Usage", + "metric": "cpu_usage", + "size": 12, + "unit": "%", + "y_label": "cpu_usage" + }, + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "Available Memory", + "metric": "available_memory", + "size": 6, + "unit": "GB", + "y_label": "available_memory" + } + ] + } \ No newline at end of file diff --git a/test/fixtures/monitor_services.json b/test/fixtures/monitor_services.json new file mode 100644 index 000000000..7a568866c --- /dev/null +++ b/test/fixtures/monitor_services.json @@ -0,0 +1,11 @@ +{ + "data": [ + { + "label": "Databases", + "service_type": "dbaas" + } + ], + "page": 1, + "pages": 1, + "results": 1 + } \ No newline at end of file diff --git a/test/fixtures/monitor_services_dbaas.json b/test/fixtures/monitor_services_dbaas.json new file mode 100644 index 000000000..7a568866c --- /dev/null +++ b/test/fixtures/monitor_services_dbaas.json @@ -0,0 +1,11 @@ +{ + "data": [ + { + "label": "Databases", + "service_type": "dbaas" + } + ], + "page": 1, + "pages": 1, + "results": 1 + } \ No newline at end of file diff --git a/test/fixtures/monitor_services_dbaas_dashboards.json b/test/fixtures/monitor_services_dbaas_dashboards.json new file mode 100644 index 000000000..5fbb7e9db --- /dev/null +++ b/test/fixtures/monitor_services_dbaas_dashboards.json @@ -0,0 +1,37 @@ +{ + "data": [ + { + "created": "2024-10-10T05:01:58", + "id": 1, + "label": "Resource Usage", + "service_type": "dbaas", + "type": "standard", + "updated": "2024-10-10T05:01:58", + "widgets": [ + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "CPU Usage", + "metric": "cpu_usage", + "size": 12, + "unit": "%", + "y_label": "cpu_usage" + }, + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "Memory Usage", + "metric": "memory_usage", + "size": 6, + "unit": "%", + "y_label": "memory_usage" + } + ] + } + ], + "page": 1, + "pages": 1, + "results": 1 + } \ No newline at end of file diff --git a/test/fixtures/monitor_services_dbaas_metric-definitions.json b/test/fixtures/monitor_services_dbaas_metric-definitions.json new file mode 100644 index 000000000..c493b23a3 --- /dev/null +++ b/test/fixtures/monitor_services_dbaas_metric-definitions.json @@ -0,0 +1,55 @@ +{ + "data": [ + { + "available_aggregate_functions": [ + "max", + "avg", + "min", + "sum" + ], + "dimensions": [ + { + "dimension_label": "node_type", + "label": "Node Type", + "values": [ + "primary", + "secondary" + ] + } + ], + "is_alertable": true, + "label": "CPU Usage", + "metric": "cpu_usage", + "metric_type": "gauge", + "scrape_interval": "60s", + "unit": "percent" + }, + { + "available_aggregate_functions": [ + "max", + "avg", + "min", + "sum" + ], + "dimensions": [ + { + "dimension_label": "node_type", + "label": "Node Type", + "values": [ + "primary", + "secondary" + ] + } + ], + "is_alertable": true, + "label": "Disk I/O Read", + "metric": "read_iops", + "metric_type": "gauge", + "scrape_interval": "60s", + "unit": "iops" + } + ], + "page": 1, + "pages": 1, + "results": 2 + } \ No newline at end of file diff --git a/test/fixtures/monitor_services_dbaas_token.json b/test/fixtures/monitor_services_dbaas_token.json new file mode 100644 index 000000000..b1aa0d786 --- /dev/null +++ b/test/fixtures/monitor_services_dbaas_token.json @@ -0,0 +1,3 @@ +{ + "token": "abcdefhjigkfghh" +} \ No newline at end of file diff --git a/test/fixtures/monitor_services_linode_token.json b/test/fixtures/monitor_services_linode_token.json new file mode 100644 index 000000000..b1aa0d786 --- /dev/null +++ b/test/fixtures/monitor_services_linode_token.json @@ -0,0 +1,3 @@ +{ + "token": "abcdefhjigkfghh" +} \ No newline at end of file diff --git a/test/integration/models/monitor/test_monitor.py b/test/integration/models/monitor/test_monitor.py new file mode 100644 index 000000000..5fb9626b3 --- /dev/null +++ b/test/integration/models/monitor/test_monitor.py @@ -0,0 +1,109 @@ +from test.integration.helpers import ( + get_test_label, + send_request_when_resource_available, + wait_for_condition, +) + +import pytest + +from linode_api4 import LinodeClient +from linode_api4.objects import ( + MonitorDashboard, + MonitorMetricsDefinition, + MonitorService, + MonitorServiceToken, +) + + +# List all dashboards +def test_get_all_dashboards(test_linode_client): + client = test_linode_client + dashboards = client.monitor.dashboards() + assert isinstance(dashboards[0], MonitorDashboard) + + dashboard_get = dashboards[0] + get_service_type = dashboard_get.service_type + + # Fetch Dashboard by ID + dashboard_by_id = client.load(MonitorDashboard, 1) + assert isinstance(dashboard_by_id, MonitorDashboard) + assert dashboard_by_id.id == 1 + + # #Fetch Dashboard by service_type + dashboards_by_svc = client.monitor.dashboards(service_type=get_service_type) + assert isinstance(dashboards_by_svc[0], MonitorDashboard) + assert dashboards_by_svc[0].service_type == get_service_type + + +# List supported services +def test_get_supported_services(test_linode_client): + client = test_linode_client + supported_services = client.monitor.services() + assert isinstance(supported_services[0], MonitorService) + + get_supported_service = supported_services[0].service_type + + # Get details for a particular service + service_details = client.monitor.services( + service_type=get_supported_service + ) + assert isinstance(service_details[0], MonitorService) + assert service_details[0].service_type == get_supported_service + + # Get Metric definition details for that particular service + metric_definitions = client.monitor.metric_definitions( + service_type=get_supported_service + ) + assert isinstance(metric_definitions[0], MonitorMetricsDefinition) + + +# Test Helpers +def get_db_engine_id(client: LinodeClient, engine: str): + engines = client.database.engines() + engine_id = "" + for e in engines: + if e.engine == engine: + engine_id = e.id + + return str(engine_id) + + +@pytest.fixture(scope="session") +def test_create_and_test_db(test_linode_client): + client = test_linode_client + label = get_test_label() + "-sqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "mysql") + dbtype = "g6-standard-1" + + db = client.database.mysql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + ) + + def get_db_status(): + return db.status == "active" + + # TAKES 15-30 MINUTES TO FULLY PROVISION DB + wait_for_condition(60, 2000, get_db_status) + + yield db + send_request_when_resource_available(300, db.delete) + + +def test_my_db_functionality(test_linode_client, test_create_and_test_db): + client = test_linode_client + assert test_create_and_test_db.status == "active" + + entity_id = test_create_and_test_db.id + + # create token for the particular service + token = client.monitor.create_token( + service_type="dbaas", entity_ids=[entity_id] + ) + assert isinstance(token, MonitorServiceToken) + assert len(token.token) > 0, "Token should not be empty" + assert hasattr(token, "token"), "Response object has no 'token' attribute" diff --git a/test/unit/objects/monitor_test.py b/test/unit/objects/monitor_test.py new file mode 100644 index 000000000..385eaf462 --- /dev/null +++ b/test/unit/objects/monitor_test.py @@ -0,0 +1,123 @@ +import datetime +from test.unit.base import ClientBaseCase + +from linode_api4.objects import MonitorDashboard + + +class MonitorTest(ClientBaseCase): + """ + Tests the methods of MonitorServiceSupported class + """ + + def test_supported_services(self): + """ + Test the services supported by monitor + """ + service = self.client.monitor.services() + self.assertEqual(len(service), 1) + self.assertEqual(service[0].label, "Databases") + self.assertEqual(service[0].service_type, "dbaas") + + def test_dashboard_by_ID(self): + """ + Test the dashboard by ID API + """ + dashboard = self.client.load(MonitorDashboard, 1) + self.assertEqual(dashboard.type, "standard") + self.assertEqual( + dashboard.created, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboard.id, 1) + self.assertEqual(dashboard.label, "Resource Usage") + self.assertEqual(dashboard.service_type, "dbaas") + self.assertEqual( + dashboard.updated, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboard.widgets[0].aggregate_function, "sum") + self.assertEqual(dashboard.widgets[0].chart_type, "area") + self.assertEqual(dashboard.widgets[0].color, "default") + self.assertEqual(dashboard.widgets[0].label, "CPU Usage") + self.assertEqual(dashboard.widgets[0].metric, "cpu_usage") + self.assertEqual(dashboard.widgets[0].size, 12) + self.assertEqual(dashboard.widgets[0].unit, "%") + self.assertEqual(dashboard.widgets[0].y_label, "cpu_usage") + + def test_dashboard_by_service_type(self): + dashboards = self.client.monitor.dashboards(service_type="dbaas") + self.assertEqual(dashboards[0].type, "standard") + self.assertEqual( + dashboards[0].created, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboards[0].id, 1) + self.assertEqual(dashboards[0].label, "Resource Usage") + self.assertEqual(dashboards[0].service_type, "dbaas") + self.assertEqual( + dashboards[0].updated, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboards[0].widgets[0].aggregate_function, "sum") + self.assertEqual(dashboards[0].widgets[0].chart_type, "area") + self.assertEqual(dashboards[0].widgets[0].color, "default") + self.assertEqual(dashboards[0].widgets[0].label, "CPU Usage") + self.assertEqual(dashboards[0].widgets[0].metric, "cpu_usage") + self.assertEqual(dashboards[0].widgets[0].size, 12) + self.assertEqual(dashboards[0].widgets[0].unit, "%") + self.assertEqual(dashboards[0].widgets[0].y_label, "cpu_usage") + + def test_get_all_dashboards(self): + dashboards = self.client.monitor.dashboards() + self.assertEqual(dashboards[0].type, "standard") + self.assertEqual( + dashboards[0].created, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboards[0].id, 1) + self.assertEqual(dashboards[0].label, "Resource Usage") + self.assertEqual(dashboards[0].service_type, "dbaas") + self.assertEqual( + dashboards[0].updated, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboards[0].widgets[0].aggregate_function, "sum") + self.assertEqual(dashboards[0].widgets[0].chart_type, "area") + self.assertEqual(dashboards[0].widgets[0].color, "default") + self.assertEqual(dashboards[0].widgets[0].label, "CPU Usage") + self.assertEqual(dashboards[0].widgets[0].metric, "cpu_usage") + self.assertEqual(dashboards[0].widgets[0].size, 12) + self.assertEqual(dashboards[0].widgets[0].unit, "%") + self.assertEqual(dashboards[0].widgets[0].y_label, "cpu_usage") + + def test_specific_service_details(self): + data = self.client.monitor.services(service_type="dbaas") + self.assertEqual(data[0].label, "Databases") + self.assertEqual(data[0].service_type, "dbaas") + + def test_metric_definitions(self): + + metrics = self.client.monitor.metric_definitions(service_type="dbaas") + self.assertEqual( + metrics[0].available_aggregate_functions, + ["max", "avg", "min", "sum"], + ) + self.assertEqual(metrics[0].is_alertable, True) + self.assertEqual(metrics[0].label, "CPU Usage") + self.assertEqual(metrics[0].metric, "cpu_usage") + self.assertEqual(metrics[0].metric_type, "gauge") + self.assertEqual(metrics[0].scrape_interval, "60s") + self.assertEqual(metrics[0].unit, "percent") + self.assertEqual(metrics[0].dimensions[0].dimension_label, "node_type") + self.assertEqual(metrics[0].dimensions[0].label, "Node Type") + self.assertEqual( + metrics[0].dimensions[0].values, ["primary", "secondary"] + ) + + def test_create_token(self): + + with self.mock_post("/monitor/services/dbaas/token") as m: + self.client.monitor.create_token( + service_type="dbaas", entity_ids=[189690, 188020] + ) + self.assertEqual(m.return_dct["token"], "abcdefhjigkfghh") + + with self.mock_post("/monitor/services/linode/token") as m: + self.client.monitor.create_token( + service_type="linode", entity_ids=["compute-instance-1"] + ) + self.assertEqual(m.return_dct["token"], "abcdefhjigkfghh") From 7b7f6470c8d61f46f9561b1cdaa8fee7a090d607 Mon Sep 17 00:00:00 2001 From: Erik Zilber Date: Fri, 6 Jun 2025 11:08:39 -0400 Subject: [PATCH 16/34] Reorganized DB unit tests (#561) --- test/unit/groups/database_test.py | 258 ++++++++++++++++++---------- test/unit/objects/database_test.py | 260 ----------------------------- 2 files changed, 166 insertions(+), 352 deletions(-) diff --git a/test/unit/groups/database_test.py b/test/unit/groups/database_test.py index d1939aec7..9647fed82 100644 --- a/test/unit/groups/database_test.py +++ b/test/unit/groups/database_test.py @@ -73,65 +73,6 @@ def test_database_instance(self): self.assertTrue(isinstance(db_translated, MySQLDatabase)) self.assertEqual(db_translated.ssl_connection, True) - -class MySQLDatabaseTest(ClientBaseCase): - """ - Tests methods of the MySQLDatabase class - """ - - def test_get_instances(self): - """ - Test that database types are properly handled - """ - dbs = self.client.database.mysql_instances() - - self.assertEqual(len(dbs), 1) - self.assertEqual(dbs[0].allow_list[1], "192.0.1.0/24") - self.assertEqual(dbs[0].cluster_size, 3) - self.assertEqual(dbs[0].encrypted, False) - self.assertEqual(dbs[0].engine, "mysql") - self.assertEqual( - dbs[0].hosts.primary, - "lin-123-456-mysql-mysql-primary.servers.linodedb.net", - ) - self.assertEqual( - dbs[0].hosts.secondary, - "lin-123-456-mysql-primary-private.servers.linodedb.net", - ) - self.assertEqual(dbs[0].id, 123) - self.assertEqual(dbs[0].region, "us-east") - self.assertEqual(dbs[0].updates.duration, 3) - self.assertEqual(dbs[0].version, "8.0.26") - - def test_create(self): - """ - Test that MySQL databases can be created - """ - - with self.mock_post("/databases/mysql/instances") as m: - # We don't care about errors here; we just want to - # validate the request. - try: - self.client.database.mysql_create( - "cool", - "us-southeast", - "mysql/8.0.26", - "g6-standard-1", - cluster_size=3, - ) - except Exception as e: - logger.warning( - "An error occurred while validating the request: %s", e - ) - - self.assertEqual(m.method, "post") - self.assertEqual(m.call_url, "/databases/mysql/instances") - self.assertEqual(m.call_data["label"], "cool") - self.assertEqual(m.call_data["region"], "us-southeast") - self.assertEqual(m.call_data["engine"], "mysql/8.0.26") - self.assertEqual(m.call_data["type"], "g6-standard-1") - self.assertEqual(m.call_data["cluster_size"], 3) - def test_mysql_config_options(self): """ Test that MySQL configuration options can be retrieved @@ -1320,15 +1261,86 @@ def test_postgresql_config_options(self): self.assertFalse(config["work_mem"]["requires_restart"]) self.assertEqual("integer", config["work_mem"]["type"]) + def test_get_mysql_instances(self): + """ + Test that mysql instances can be retrieved properly + """ + dbs = self.client.database.mysql_instances() -class PostgreSQLDatabaseTest(ClientBaseCase): - """ - Tests methods of the PostgreSQLDatabase class - """ + self.assertEqual(len(dbs), 1) + self.assertEqual(dbs[0].allow_list[1], "192.0.1.0/24") + self.assertEqual(dbs[0].cluster_size, 3) + self.assertEqual(dbs[0].encrypted, False) + self.assertEqual(dbs[0].engine, "mysql") + self.assertEqual( + dbs[0].hosts.primary, + "lin-123-456-mysql-mysql-primary.servers.linodedb.net", + ) + self.assertEqual( + dbs[0].hosts.secondary, + "lin-123-456-mysql-primary-private.servers.linodedb.net", + ) + self.assertEqual(dbs[0].id, 123) + self.assertEqual(dbs[0].region, "us-east") + self.assertEqual(dbs[0].updates.duration, 3) + self.assertEqual(dbs[0].version, "8.0.26") + self.assertEqual(dbs[0].engine_config.binlog_retention_period, 600) + self.assertEqual(dbs[0].engine_config.mysql.connect_timeout, 10) + self.assertEqual(dbs[0].engine_config.mysql.default_time_zone, "+03:00") + self.assertEqual(dbs[0].engine_config.mysql.group_concat_max_len, 1024) + self.assertEqual( + dbs[0].engine_config.mysql.information_schema_stats_expiry, 86400 + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_change_buffer_max_size, 30 + ) + self.assertEqual(dbs[0].engine_config.mysql.innodb_flush_neighbors, 0) + self.assertEqual(dbs[0].engine_config.mysql.innodb_ft_min_token_size, 3) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_ft_server_stopword_table, + "db_name/table_name", + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_lock_wait_timeout, 50 + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_log_buffer_size, 16777216 + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_online_alter_log_max_size, + 134217728, + ) + self.assertEqual(dbs[0].engine_config.mysql.innodb_read_io_threads, 10) + self.assertTrue(dbs[0].engine_config.mysql.innodb_rollback_on_timeout) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_thread_concurrency, 10 + ) + self.assertEqual(dbs[0].engine_config.mysql.innodb_write_io_threads, 10) + self.assertEqual(dbs[0].engine_config.mysql.interactive_timeout, 3600) + self.assertEqual( + dbs[0].engine_config.mysql.internal_tmp_mem_storage_engine, + "TempTable", + ) + self.assertEqual( + dbs[0].engine_config.mysql.max_allowed_packet, 67108864 + ) + self.assertEqual( + dbs[0].engine_config.mysql.max_heap_table_size, 16777216 + ) + self.assertEqual(dbs[0].engine_config.mysql.net_buffer_length, 16384) + self.assertEqual(dbs[0].engine_config.mysql.net_read_timeout, 30) + self.assertEqual(dbs[0].engine_config.mysql.net_write_timeout, 30) + self.assertEqual(dbs[0].engine_config.mysql.sort_buffer_size, 262144) + self.assertEqual( + dbs[0].engine_config.mysql.sql_mode, "ANSI,TRADITIONAL" + ) + self.assertTrue(dbs[0].engine_config.mysql.sql_require_primary_key) + self.assertEqual(dbs[0].engine_config.mysql.tmp_table_size, 16777216) + self.assertEqual(dbs[0].engine_config.mysql.wait_timeout, 28800) - def test_get_instances(self): + def test_get_postgresql_instances(self): """ - Test that database types are properly handled + Test that postgresql instances can be retrieved properly """ dbs = self.client.database.postgresql_instances() @@ -1350,31 +1362,93 @@ def test_get_instances(self): self.assertEqual(dbs[0].updates.duration, 3) self.assertEqual(dbs[0].version, "13.2") - def test_create(self): - """ - Test that PostgreSQL databases can be created - """ + print(dbs[0].engine_config.pg.__dict__) - with self.mock_post("/databases/postgresql/instances") as m: - # We don't care about errors here; we just want to - # validate the request. - try: - self.client.database.postgresql_create( - "cool", - "us-southeast", - "postgresql/13.2", - "g6-standard-1", - cluster_size=3, - ) - except Exception as e: - logger.warning( - "An error occurred while validating the request: %s", e - ) - - self.assertEqual(m.method, "post") - self.assertEqual(m.call_url, "/databases/postgresql/instances") - self.assertEqual(m.call_data["label"], "cool") - self.assertEqual(m.call_data["region"], "us-southeast") - self.assertEqual(m.call_data["engine"], "postgresql/13.2") - self.assertEqual(m.call_data["type"], "g6-standard-1") - self.assertEqual(m.call_data["cluster_size"], 3) + self.assertTrue(dbs[0].engine_config.pg_stat_monitor_enable) + self.assertEqual( + dbs[0].engine_config.pglookout.max_failover_replication_time_lag, + 1000, + ) + self.assertEqual(dbs[0].engine_config.shared_buffers_percentage, 41.5) + self.assertEqual(dbs[0].engine_config.work_mem, 4) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_analyze_scale_factor, 0.5 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_analyze_threshold, 100 + ) + self.assertEqual(dbs[0].engine_config.pg.autovacuum_max_workers, 10) + self.assertEqual(dbs[0].engine_config.pg.autovacuum_naptime, 100) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_cost_delay, 50 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_cost_limit, 100 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_scale_factor, 0.5 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_threshold, 100 + ) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_delay, 200) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_flush_after, 512) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_lru_maxpages, 100) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_lru_multiplier, 2.0) + self.assertEqual(dbs[0].engine_config.pg.deadlock_timeout, 1000) + self.assertEqual( + dbs[0].engine_config.pg.default_toast_compression, "lz4" + ) + self.assertEqual( + dbs[0].engine_config.pg.idle_in_transaction_session_timeout, 100 + ) + self.assertTrue(dbs[0].engine_config.pg.jit) + self.assertEqual(dbs[0].engine_config.pg.max_files_per_process, 100) + self.assertEqual(dbs[0].engine_config.pg.max_locks_per_transaction, 100) + self.assertEqual( + dbs[0].engine_config.pg.max_logical_replication_workers, 32 + ) + self.assertEqual(dbs[0].engine_config.pg.max_parallel_workers, 64) + self.assertEqual( + dbs[0].engine_config.pg.max_parallel_workers_per_gather, 64 + ) + self.assertEqual( + dbs[0].engine_config.pg.max_pred_locks_per_transaction, 1000 + ) + self.assertEqual(dbs[0].engine_config.pg.max_replication_slots, 32) + self.assertEqual(dbs[0].engine_config.pg.max_slot_wal_keep_size, 100) + self.assertEqual(dbs[0].engine_config.pg.max_stack_depth, 3507152) + self.assertEqual( + dbs[0].engine_config.pg.max_standby_archive_delay, 1000 + ) + self.assertEqual( + dbs[0].engine_config.pg.max_standby_streaming_delay, 1000 + ) + self.assertEqual(dbs[0].engine_config.pg.max_wal_senders, 32) + self.assertEqual(dbs[0].engine_config.pg.max_worker_processes, 64) + self.assertEqual( + dbs[0].engine_config.pg.password_encryption, "scram-sha-256" + ) + self.assertEqual(dbs[0].engine_config.pg.pg_partman_bgw_interval, 3600) + self.assertEqual( + dbs[0].engine_config.pg.pg_partman_bgw_role, "myrolename" + ) + self.assertFalse( + dbs[0].engine_config.pg.pg_stat_monitor_pgsm_enable_query_plan + ) + self.assertEqual( + dbs[0].engine_config.pg.pg_stat_monitor_pgsm_max_buckets, 10 + ) + self.assertEqual( + dbs[0].engine_config.pg.pg_stat_statements_track, "top" + ) + self.assertEqual(dbs[0].engine_config.pg.temp_file_limit, 5000000) + self.assertEqual(dbs[0].engine_config.pg.timezone, "Europe/Helsinki") + self.assertEqual( + dbs[0].engine_config.pg.track_activity_query_size, 1024 + ) + self.assertEqual(dbs[0].engine_config.pg.track_commit_timestamp, "off") + self.assertEqual(dbs[0].engine_config.pg.track_functions, "all") + self.assertEqual(dbs[0].engine_config.pg.track_io_timing, "off") + self.assertEqual(dbs[0].engine_config.pg.wal_sender_timeout, 60000) + self.assertEqual(dbs[0].engine_config.pg.wal_writer_delay, 50) diff --git a/test/unit/objects/database_test.py b/test/unit/objects/database_test.py index 8605e43c5..c5abe3a58 100644 --- a/test/unit/objects/database_test.py +++ b/test/unit/objects/database_test.py @@ -13,156 +13,11 @@ logger = logging.getLogger(__name__) -class DatabaseTest(ClientBaseCase): - """ - Tests methods of the DatabaseGroup class - """ - - def test_get_types(self): - """ - Test that database types are properly handled - """ - types = self.client.database.types() - - self.assertEqual(len(types), 1) - self.assertEqual(types[0].type_class, "nanode") - self.assertEqual(types[0].id, "g6-nanode-1") - self.assertEqual(types[0].engines.mysql[0].price.monthly, 20) - - def test_get_engines(self): - """ - Test that database engines are properly handled - """ - engines = self.client.database.engines() - - self.assertEqual(len(engines), 2) - - self.assertEqual(engines[0].engine, "mysql") - self.assertEqual(engines[0].id, "mysql/8.0.26") - self.assertEqual(engines[0].version, "8.0.26") - - self.assertEqual(engines[1].engine, "postgresql") - self.assertEqual(engines[1].id, "postgresql/10.14") - self.assertEqual(engines[1].version, "10.14") - - def test_get_databases(self): - """ - Test that databases are properly handled - """ - dbs = self.client.database.instances() - - self.assertEqual(len(dbs), 1) - self.assertEqual(dbs[0].allow_list[1], "192.0.1.0/24") - self.assertEqual(dbs[0].cluster_size, 3) - self.assertEqual(dbs[0].encrypted, False) - self.assertEqual(dbs[0].engine, "mysql") - self.assertEqual( - dbs[0].hosts.primary, - "lin-123-456-mysql-mysql-primary.servers.linodedb.net", - ) - self.assertEqual( - dbs[0].hosts.secondary, - "lin-123-456-mysql-primary-private.servers.linodedb.net", - ) - self.assertEqual(dbs[0].id, 123) - self.assertEqual(dbs[0].region, "us-east") - self.assertEqual(dbs[0].updates.duration, 3) - self.assertEqual(dbs[0].version, "8.0.26") - - def test_database_instance(self): - """ - Ensures that the .instance attribute properly translates database types - """ - - dbs = self.client.database.instances() - db_translated = dbs[0].instance - - self.assertTrue(isinstance(db_translated, MySQLDatabase)) - self.assertEqual(db_translated.ssl_connection, True) - - class MySQLDatabaseTest(ClientBaseCase): """ Tests methods of the MySQLDatabase class """ - def test_get_instances(self): - """ - Test that database types are properly handled - """ - dbs = self.client.database.mysql_instances() - - self.assertEqual(len(dbs), 1) - self.assertEqual(dbs[0].allow_list[1], "192.0.1.0/24") - self.assertEqual(dbs[0].cluster_size, 3) - self.assertEqual(dbs[0].encrypted, False) - self.assertEqual(dbs[0].engine, "mysql") - self.assertEqual( - dbs[0].hosts.primary, - "lin-123-456-mysql-mysql-primary.servers.linodedb.net", - ) - self.assertEqual( - dbs[0].hosts.secondary, - "lin-123-456-mysql-primary-private.servers.linodedb.net", - ) - self.assertEqual(dbs[0].id, 123) - self.assertEqual(dbs[0].region, "us-east") - self.assertEqual(dbs[0].updates.duration, 3) - self.assertEqual(dbs[0].version, "8.0.26") - self.assertEqual(dbs[0].engine_config.binlog_retention_period, 600) - self.assertEqual(dbs[0].engine_config.mysql.connect_timeout, 10) - self.assertEqual(dbs[0].engine_config.mysql.default_time_zone, "+03:00") - self.assertEqual(dbs[0].engine_config.mysql.group_concat_max_len, 1024) - self.assertEqual( - dbs[0].engine_config.mysql.information_schema_stats_expiry, 86400 - ) - self.assertEqual( - dbs[0].engine_config.mysql.innodb_change_buffer_max_size, 30 - ) - self.assertEqual(dbs[0].engine_config.mysql.innodb_flush_neighbors, 0) - self.assertEqual(dbs[0].engine_config.mysql.innodb_ft_min_token_size, 3) - self.assertEqual( - dbs[0].engine_config.mysql.innodb_ft_server_stopword_table, - "db_name/table_name", - ) - self.assertEqual( - dbs[0].engine_config.mysql.innodb_lock_wait_timeout, 50 - ) - self.assertEqual( - dbs[0].engine_config.mysql.innodb_log_buffer_size, 16777216 - ) - self.assertEqual( - dbs[0].engine_config.mysql.innodb_online_alter_log_max_size, - 134217728, - ) - self.assertEqual(dbs[0].engine_config.mysql.innodb_read_io_threads, 10) - self.assertTrue(dbs[0].engine_config.mysql.innodb_rollback_on_timeout) - self.assertEqual( - dbs[0].engine_config.mysql.innodb_thread_concurrency, 10 - ) - self.assertEqual(dbs[0].engine_config.mysql.innodb_write_io_threads, 10) - self.assertEqual(dbs[0].engine_config.mysql.interactive_timeout, 3600) - self.assertEqual( - dbs[0].engine_config.mysql.internal_tmp_mem_storage_engine, - "TempTable", - ) - self.assertEqual( - dbs[0].engine_config.mysql.max_allowed_packet, 67108864 - ) - self.assertEqual( - dbs[0].engine_config.mysql.max_heap_table_size, 16777216 - ) - self.assertEqual(dbs[0].engine_config.mysql.net_buffer_length, 16384) - self.assertEqual(dbs[0].engine_config.mysql.net_read_timeout, 30) - self.assertEqual(dbs[0].engine_config.mysql.net_write_timeout, 30) - self.assertEqual(dbs[0].engine_config.mysql.sort_buffer_size, 262144) - self.assertEqual( - dbs[0].engine_config.mysql.sql_mode, "ANSI,TRADITIONAL" - ) - self.assertTrue(dbs[0].engine_config.mysql.sql_require_primary_key) - self.assertEqual(dbs[0].engine_config.mysql.tmp_table_size, 16777216) - self.assertEqual(dbs[0].engine_config.mysql.wait_timeout, 28800) - def test_create(self): """ Test that MySQL databases can be created @@ -378,121 +233,6 @@ class PostgreSQLDatabaseTest(ClientBaseCase): Tests methods of the PostgreSQLDatabase class """ - def test_get_instances(self): - """ - Test that database types are properly handled - """ - dbs = self.client.database.postgresql_instances() - - self.assertEqual(len(dbs), 1) - self.assertEqual(dbs[0].allow_list[1], "192.0.1.0/24") - self.assertEqual(dbs[0].cluster_size, 3) - self.assertEqual(dbs[0].encrypted, False) - self.assertEqual(dbs[0].engine, "postgresql") - self.assertEqual( - dbs[0].hosts.primary, - "lin-0000-000-pgsql-primary.servers.linodedb.net", - ) - self.assertEqual( - dbs[0].hosts.secondary, - "lin-0000-000-pgsql-primary-private.servers.linodedb.net", - ) - self.assertEqual(dbs[0].id, 123) - self.assertEqual(dbs[0].region, "us-east") - self.assertEqual(dbs[0].updates.duration, 3) - self.assertEqual(dbs[0].version, "13.2") - - print(dbs[0].engine_config.pg.__dict__) - - self.assertTrue(dbs[0].engine_config.pg_stat_monitor_enable) - self.assertEqual( - dbs[0].engine_config.pglookout.max_failover_replication_time_lag, - 1000, - ) - self.assertEqual(dbs[0].engine_config.shared_buffers_percentage, 41.5) - self.assertEqual(dbs[0].engine_config.work_mem, 4) - self.assertEqual( - dbs[0].engine_config.pg.autovacuum_analyze_scale_factor, 0.5 - ) - self.assertEqual( - dbs[0].engine_config.pg.autovacuum_analyze_threshold, 100 - ) - self.assertEqual(dbs[0].engine_config.pg.autovacuum_max_workers, 10) - self.assertEqual(dbs[0].engine_config.pg.autovacuum_naptime, 100) - self.assertEqual( - dbs[0].engine_config.pg.autovacuum_vacuum_cost_delay, 50 - ) - self.assertEqual( - dbs[0].engine_config.pg.autovacuum_vacuum_cost_limit, 100 - ) - self.assertEqual( - dbs[0].engine_config.pg.autovacuum_vacuum_scale_factor, 0.5 - ) - self.assertEqual( - dbs[0].engine_config.pg.autovacuum_vacuum_threshold, 100 - ) - self.assertEqual(dbs[0].engine_config.pg.bgwriter_delay, 200) - self.assertEqual(dbs[0].engine_config.pg.bgwriter_flush_after, 512) - self.assertEqual(dbs[0].engine_config.pg.bgwriter_lru_maxpages, 100) - self.assertEqual(dbs[0].engine_config.pg.bgwriter_lru_multiplier, 2.0) - self.assertEqual(dbs[0].engine_config.pg.deadlock_timeout, 1000) - self.assertEqual( - dbs[0].engine_config.pg.default_toast_compression, "lz4" - ) - self.assertEqual( - dbs[0].engine_config.pg.idle_in_transaction_session_timeout, 100 - ) - self.assertTrue(dbs[0].engine_config.pg.jit) - self.assertEqual(dbs[0].engine_config.pg.max_files_per_process, 100) - self.assertEqual(dbs[0].engine_config.pg.max_locks_per_transaction, 100) - self.assertEqual( - dbs[0].engine_config.pg.max_logical_replication_workers, 32 - ) - self.assertEqual(dbs[0].engine_config.pg.max_parallel_workers, 64) - self.assertEqual( - dbs[0].engine_config.pg.max_parallel_workers_per_gather, 64 - ) - self.assertEqual( - dbs[0].engine_config.pg.max_pred_locks_per_transaction, 1000 - ) - self.assertEqual(dbs[0].engine_config.pg.max_replication_slots, 32) - self.assertEqual(dbs[0].engine_config.pg.max_slot_wal_keep_size, 100) - self.assertEqual(dbs[0].engine_config.pg.max_stack_depth, 3507152) - self.assertEqual( - dbs[0].engine_config.pg.max_standby_archive_delay, 1000 - ) - self.assertEqual( - dbs[0].engine_config.pg.max_standby_streaming_delay, 1000 - ) - self.assertEqual(dbs[0].engine_config.pg.max_wal_senders, 32) - self.assertEqual(dbs[0].engine_config.pg.max_worker_processes, 64) - self.assertEqual( - dbs[0].engine_config.pg.password_encryption, "scram-sha-256" - ) - self.assertEqual(dbs[0].engine_config.pg.pg_partman_bgw_interval, 3600) - self.assertEqual( - dbs[0].engine_config.pg.pg_partman_bgw_role, "myrolename" - ) - self.assertFalse( - dbs[0].engine_config.pg.pg_stat_monitor_pgsm_enable_query_plan - ) - self.assertEqual( - dbs[0].engine_config.pg.pg_stat_monitor_pgsm_max_buckets, 10 - ) - self.assertEqual( - dbs[0].engine_config.pg.pg_stat_statements_track, "top" - ) - self.assertEqual(dbs[0].engine_config.pg.temp_file_limit, 5000000) - self.assertEqual(dbs[0].engine_config.pg.timezone, "Europe/Helsinki") - self.assertEqual( - dbs[0].engine_config.pg.track_activity_query_size, 1024 - ) - self.assertEqual(dbs[0].engine_config.pg.track_commit_timestamp, "off") - self.assertEqual(dbs[0].engine_config.pg.track_functions, "all") - self.assertEqual(dbs[0].engine_config.pg.track_io_timing, "off") - self.assertEqual(dbs[0].engine_config.pg.wal_sender_timeout, 60000) - self.assertEqual(dbs[0].engine_config.pg.wal_writer_delay, 50) - def test_create(self): """ Test that PostgreSQL databases can be created From 389905e117b7177c377464bac3d68a18a0e99c42 Mon Sep 17 00:00:00 2001 From: Zhiwei Liang <121905282+zliang-akamai@users.noreply.github.com> Date: Mon, 30 Jun 2025 13:51:27 -0400 Subject: [PATCH 17/34] Fix Tests for v5.33 release (#567) * Add 503 status code to retry in `send_request_when_resource_available` * Remove error message assertions in VPC tests --- test/integration/helpers.py | 2 +- test/integration/models/vpc/test_vpc.py | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/test/integration/helpers.py b/test/integration/helpers.py index 0ee9810a8..9777d5950 100644 --- a/test/integration/helpers.py +++ b/test/integration/helpers.py @@ -43,7 +43,7 @@ def send_request_when_resource_available( timeout: int, func: Callable, *args, **kwargs ) -> object: start_time = time.time() - retry_statuses = {400, 500} + retry_statuses = {400, 500, 503} while True: try: diff --git a/test/integration/models/vpc/test_vpc.py b/test/integration/models/vpc/test_vpc.py index 5dd14b502..0e9d27aff 100644 --- a/test/integration/models/vpc/test_vpc.py +++ b/test/integration/models/vpc/test_vpc.py @@ -56,7 +56,6 @@ def test_fails_create_vpc_invalid_data(test_linode_client): description="test description", ) assert excinfo.value.status == 400 - assert "Label must include only ASCII" in str(excinfo.value.json) def test_get_all_vpcs(test_linode_client, create_multiple_vpcs): @@ -78,7 +77,6 @@ def test_fails_update_vpc_invalid_data(create_vpc): vpc.save() assert excinfo.value.status == 400 - assert "Label must include only ASCII" in str(excinfo.value.json) def test_fails_create_subnet_invalid_data(create_vpc): @@ -88,7 +86,6 @@ def test_fails_create_subnet_invalid_data(create_vpc): create_vpc.subnet_create("test-subnet", ipv4=invalid_ipv4) assert excinfo.value.status == 400 - assert "ipv4 must be an IPv4 network" in str(excinfo.value.json) def test_fails_update_subnet_invalid_data(create_vpc_with_subnet): @@ -100,4 +97,3 @@ def test_fails_update_subnet_invalid_data(create_vpc_with_subnet): subnet.save() assert excinfo.value.status == 400 - assert "Label must include only ASCII" in str(excinfo.value.json) From b6ccfd3362a506ccc2cc5b62e9781b60213e02be Mon Sep 17 00:00:00 2001 From: pmajali Date: Wed, 9 Jul 2025 23:35:08 +0530 Subject: [PATCH 18/34] updating MonitorService class (#568) * updating services endpoint * resolving lint errors --- linode_api4/groups/monitor.py | 18 ++++++-------- linode_api4/objects/monitor.py | 11 +++++---- test/fixtures/monitor_services_dbaas.json | 24 +++++++++++-------- .../models/monitor/test_monitor.py | 8 +++---- test/unit/objects/monitor_test.py | 8 +++---- 5 files changed, 35 insertions(+), 34 deletions(-) diff --git a/linode_api4/groups/monitor.py b/linode_api4/groups/monitor.py index 908b4e819..14b5617c4 100644 --- a/linode_api4/groups/monitor.py +++ b/linode_api4/groups/monitor.py @@ -62,32 +62,28 @@ def dashboards( ) def services( - self, *filters, service_type: Optional[str] = None - ) -> list[MonitorService]: + self, + *filters, + ) -> PaginatedList: """ Lists services supported by ACLP. supported_services = client.monitor.services() - service_details = client.monitor.services(service_type="dbaas") + service_details = client.monitor.load(MonitorService, "dbaas") .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-services API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-services-for-service-type - :param service_type: The service type to get details for. - :type service_type: Optional[str] :param filters: Any number of filters to apply to this query. See :doc:`Filtering Collections` for more details on filtering. - :returns: Lists monitor services by a given service_type + :returns: Lists monitor services :rtype: PaginatedList of the Services """ - endpoint = ( - f"/monitor/services/{service_type}" - if service_type - else "/monitor/services" - ) + endpoint = "/monitor/services" + return self.client._get_and_filter( MonitorService, *filters, diff --git a/linode_api4/objects/monitor.py b/linode_api4/objects/monitor.py index f518e641d..ae3936ee7 100644 --- a/linode_api4/objects/monitor.py +++ b/linode_api4/objects/monitor.py @@ -157,16 +157,19 @@ class MonitorDashboard(Base): } -@dataclass -class MonitorService(JSONObject): +class MonitorService(Base): """ Represents a single service type. API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-services """ - service_type: ServiceType = "" - label: str = "" + api_endpoint = "/monitor/services/{service_type}" + id_attribute = "service_type" + properties = { + "service_type": Property(ServiceType), + "label": Property(), + } @dataclass diff --git a/test/fixtures/monitor_services_dbaas.json b/test/fixtures/monitor_services_dbaas.json index 7a568866c..211833847 100644 --- a/test/fixtures/monitor_services_dbaas.json +++ b/test/fixtures/monitor_services_dbaas.json @@ -1,11 +1,15 @@ { - "data": [ - { - "label": "Databases", - "service_type": "dbaas" - } - ], - "page": 1, - "pages": 1, - "results": 1 - } \ No newline at end of file + "service_type": "dbaas", + "label": "Databases", + "alert": { + "polling_interval_seconds": [ + 300 + ], + "evaluation_period_seconds": [ + 300 + ], + "scope": [ + "entity" + ] + } +} \ No newline at end of file diff --git a/test/integration/models/monitor/test_monitor.py b/test/integration/models/monitor/test_monitor.py index 5fb9626b3..7c9249f42 100644 --- a/test/integration/models/monitor/test_monitor.py +++ b/test/integration/models/monitor/test_monitor.py @@ -44,11 +44,9 @@ def test_get_supported_services(test_linode_client): get_supported_service = supported_services[0].service_type # Get details for a particular service - service_details = client.monitor.services( - service_type=get_supported_service - ) - assert isinstance(service_details[0], MonitorService) - assert service_details[0].service_type == get_supported_service + service_details = client.load(MonitorService, get_supported_service) + assert isinstance(service_details, MonitorService) + assert service_details.service_type == get_supported_service # Get Metric definition details for that particular service metric_definitions = client.monitor.metric_definitions( diff --git a/test/unit/objects/monitor_test.py b/test/unit/objects/monitor_test.py index 385eaf462..a010514c2 100644 --- a/test/unit/objects/monitor_test.py +++ b/test/unit/objects/monitor_test.py @@ -1,7 +1,7 @@ import datetime from test.unit.base import ClientBaseCase -from linode_api4.objects import MonitorDashboard +from linode_api4.objects import MonitorDashboard, MonitorService class MonitorTest(ClientBaseCase): @@ -85,9 +85,9 @@ def test_get_all_dashboards(self): self.assertEqual(dashboards[0].widgets[0].y_label, "cpu_usage") def test_specific_service_details(self): - data = self.client.monitor.services(service_type="dbaas") - self.assertEqual(data[0].label, "Databases") - self.assertEqual(data[0].service_type, "dbaas") + data = self.client.load(MonitorService, "dbaas") + self.assertEqual(data.label, "Databases") + self.assertEqual(data.service_type, "dbaas") def test_metric_definitions(self): From 818feb8d6ca440cf26d7c8a87d1e2bddcbaccf54 Mon Sep 17 00:00:00 2001 From: Youjung Kim <126618609+ykim-akamai@users.noreply.github.com> Date: Tue, 15 Jul 2025 07:03:55 -0700 Subject: [PATCH 19/34] Add basic model filter integration test coverage (#563) --- Makefile | 2 +- test/integration/filters/fixtures.py | 39 +++++++++ .../integration/filters/model_filters_test.py | 84 +++++++++++++++++++ 3 files changed, 124 insertions(+), 1 deletion(-) create mode 100644 test/integration/filters/fixtures.py create mode 100644 test/integration/filters/model_filters_test.py diff --git a/Makefile b/Makefile index 4bfb1c348..ce7ef77d0 100644 --- a/Makefile +++ b/Makefile @@ -58,7 +58,7 @@ lint: build # TEST_CASE: Optional, specify a test case (e.g. 'test_image_replication') # TEST_ARGS: Optional, additional arguments for pytest (e.g. '-v' for verbose mode) -TEST_COMMAND = $(if $(TEST_SUITE),$(if $(filter $(TEST_SUITE),linode_client login_client),$(TEST_SUITE),models/$(TEST_SUITE))) +TEST_COMMAND = $(if $(TEST_SUITE),$(if $(filter $(TEST_SUITE),linode_client login_client filters),$(TEST_SUITE),models/$(TEST_SUITE))) .PHONY: test-int test-int: diff --git a/test/integration/filters/fixtures.py b/test/integration/filters/fixtures.py new file mode 100644 index 000000000..344303eee --- /dev/null +++ b/test/integration/filters/fixtures.py @@ -0,0 +1,39 @@ +from test.integration.conftest import get_region +from test.integration.helpers import get_test_label + +import pytest + + +@pytest.fixture(scope="package") +def domain_instance(test_linode_client): + client = test_linode_client + + domain_addr = get_test_label(5) + "-example.com" + soa_email = "dx-test-email@linode.com" + + domain = client.domain_create(domain=domain_addr, soa_email=soa_email) + + yield domain + + domain.delete() + + +@pytest.fixture(scope="package") +def lke_cluster_instance(test_linode_client): + node_type = test_linode_client.linode.types()[1] # g6-standard-1 + version = test_linode_client.lke.versions()[0] + + region = get_region( + test_linode_client, {"Kubernetes", "LA Disk Encryption"} + ) + + node_pools = test_linode_client.lke.node_pool(node_type, 3) + label = get_test_label() + "_cluster" + + cluster = test_linode_client.lke.cluster_create( + region, label, node_pools, version + ) + + yield cluster + + cluster.delete() diff --git a/test/integration/filters/model_filters_test.py b/test/integration/filters/model_filters_test.py new file mode 100644 index 000000000..22bb8299e --- /dev/null +++ b/test/integration/filters/model_filters_test.py @@ -0,0 +1,84 @@ +from test.integration.filters.fixtures import ( # noqa: F401 + domain_instance, + lke_cluster_instance, +) + +from linode_api4.objects import ( + DatabaseEngine, + DatabaseType, + Domain, + Firewall, + Image, + LKECluster, + Type, +) + + +def test_database_type_model_filter(test_linode_client): + client = test_linode_client + + db_disk = client.database.types()[0].disk + + filtered_db_type = client.database.types(DatabaseType.disk == db_disk) + + assert db_disk == filtered_db_type[0].disk + + +def test_database_engine_model_filter(test_linode_client): + client = test_linode_client + + engine = "mysql" + + filtered_db_engine = client.database.engines( + DatabaseEngine.engine == engine + ) + + assert len(client.database.engines()) > len(filtered_db_engine) + + +def test_domain_model_filter(test_linode_client, domain_instance): + client = test_linode_client + + filtered_domain = client.domains(Domain.domain == domain_instance.domain) + + assert domain_instance.id == filtered_domain[0].id + + +def test_image_model_filter(test_linode_client): + client = test_linode_client + + filtered_images = client.images(Image.label.contains("Debian")) + + assert len(client.images()) > len(filtered_images) + + +def test_linode_type_model_filter(test_linode_client): + client = test_linode_client + + filtered_types = client.linode.types(Type.label.contains("Linode")) + + assert len(filtered_types) > 0 + assert "Linode" in filtered_types[0].label + + +def test_lke_cluster_model_filter(test_linode_client, lke_cluster_instance): + client = test_linode_client + + filtered_cluster = client.lke.clusters( + LKECluster.label.contains(lke_cluster_instance.label) + ) + + assert filtered_cluster[0].id == lke_cluster_instance.id + + +def test_networking_firewall_model_filter( + test_linode_client, e2e_test_firewall +): + client = test_linode_client + + filtered_firewall = client.networking.firewalls( + Firewall.label.contains(e2e_test_firewall.label) + ) + + assert len(filtered_firewall) > 0 + assert e2e_test_firewall.label in filtered_firewall[0].label From e35ffe81a546fb18b29068843648522882f41ac3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Jul 2025 10:54:10 -0400 Subject: [PATCH 20/34] build(deps): bump slackapi/slack-github-action from 2.1.0 to 2.1.1 (#570) Bumps [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) from 2.1.0 to 2.1.1. - [Release notes](https://github.com/slackapi/slack-github-action/releases) - [Commits](https://github.com/slackapi/slack-github-action/compare/v2.1.0...v2.1.1) --- updated-dependencies: - dependency-name: slackapi/slack-github-action dependency-version: 2.1.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/e2e-test.yml | 4 ++-- .github/workflows/nightly-smoke-tests.yml | 2 +- .github/workflows/release-notify-slack.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml index d08999645..1c4ec8540 100644 --- a/.github/workflows/e2e-test.yml +++ b/.github/workflows/e2e-test.yml @@ -232,7 +232,7 @@ jobs: steps: - name: Notify Slack id: main_message - uses: slackapi/slack-github-action@v2.1.0 + uses: slackapi/slack-github-action@v2.1.1 with: method: chat.postMessage token: ${{ secrets.SLACK_BOT_TOKEN }} @@ -264,7 +264,7 @@ jobs: - name: Test summary thread if: success() - uses: slackapi/slack-github-action@v2.1.0 + uses: slackapi/slack-github-action@v2.1.1 with: method: chat.postMessage token: ${{ secrets.SLACK_BOT_TOKEN }} diff --git a/.github/workflows/nightly-smoke-tests.yml b/.github/workflows/nightly-smoke-tests.yml index 3f6083a98..dc41e1600 100644 --- a/.github/workflows/nightly-smoke-tests.yml +++ b/.github/workflows/nightly-smoke-tests.yml @@ -45,7 +45,7 @@ jobs: - name: Notify Slack if: always() && github.repository == 'linode/linode_api4-python' - uses: slackapi/slack-github-action@v2.1.0 + uses: slackapi/slack-github-action@v2.1.1 with: method: chat.postMessage token: ${{ secrets.SLACK_BOT_TOKEN }} diff --git a/.github/workflows/release-notify-slack.yml b/.github/workflows/release-notify-slack.yml index f2739e988..4b01f094b 100644 --- a/.github/workflows/release-notify-slack.yml +++ b/.github/workflows/release-notify-slack.yml @@ -11,7 +11,7 @@ jobs: steps: - name: Notify Slack - Main Message id: main_message - uses: slackapi/slack-github-action@v2.1.0 + uses: slackapi/slack-github-action@v2.1.1 with: method: chat.postMessage token: ${{ secrets.SLACK_BOT_TOKEN }} From de7cde17026e8b333eafb15e00215825d8f7e74e Mon Sep 17 00:00:00 2001 From: Ye Chen <127243817+yec-akamai@users.noreply.github.com> Date: Tue, 22 Jul 2025 11:20:54 -0400 Subject: [PATCH 21/34] Fix timeout integration test cases (#575) --- test/integration/models/account/test_account.py | 4 ++-- test/integration/models/linode/test_linode.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration/models/account/test_account.py b/test/integration/models/account/test_account.py index decad434f..72cd97cda 100644 --- a/test/integration/models/account/test_account.py +++ b/test/integration/models/account/test_account.py @@ -1,7 +1,7 @@ import time from datetime import datetime from test.integration.conftest import get_region -from test.integration.helpers import get_test_label +from test.integration.helpers import get_test_label, retry_sending_request import pytest @@ -37,7 +37,7 @@ def test_get_account(test_linode_client): def test_get_login(test_linode_client): client = test_linode_client - login = client.load(Login(client, "", {}), "") + login = retry_sending_request(3, client.load, Login(client, "", {}), "") updated_time = int(time.mktime(getattr(login, "_last_updated").timetuple())) diff --git a/test/integration/models/linode/test_linode.py b/test/integration/models/linode/test_linode.py index ade4ca5ed..97965f2b9 100644 --- a/test/integration/models/linode/test_linode.py +++ b/test/integration/models/linode/test_linode.py @@ -252,7 +252,7 @@ def test_linode_rebuild(test_linode_client): disk_encryption=InstanceDiskEncryptionType.disabled, ) - wait_for_condition(10, 100, get_status, linode, "rebuilding") + wait_for_condition(10, 300, get_status, linode, "rebuilding") assert linode.status == "rebuilding" assert linode.image.id == "linode/debian12" From 9987e83e32dd97f82eaaa725a99b9edc37e313b4 Mon Sep 17 00:00:00 2001 From: Zhiwei Liang <121905282+zliang-akamai@users.noreply.github.com> Date: Thu, 24 Jul 2025 15:54:08 -0400 Subject: [PATCH 22/34] Raise the exception from the API error after retry in tests (#576) --- test/integration/helpers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration/helpers.py b/test/integration/helpers.py index 9777d5950..969ca70a9 100644 --- a/test/integration/helpers.py +++ b/test/integration/helpers.py @@ -31,11 +31,11 @@ def retry_sending_request( for attempt in range(1, retries + 1): try: return condition(*args, **kwargs) - except ApiError: + except ApiError as e: if attempt == retries: - raise ApiError( + raise Exception( "Api Error: Failed after all retry attempts" - ) from None + ) from e time.sleep(backoff) From 74e272ad5ce6a8b4c64bc2d48955eed537a5db47 Mon Sep 17 00:00:00 2001 From: Ye Chen <127243817+yec-akamai@users.noreply.github.com> Date: Mon, 11 Aug 2025 14:00:37 -0400 Subject: [PATCH 23/34] Support Monitor Client and Fetch Entity Metrics (#569) * init * lint * define all * clean up * lint * fix import * fix import * add int test --- linode_api4/__init__.py | 2 +- linode_api4/groups/__init__.py | 1 + linode_api4/groups/group.py | 4 +- linode_api4/groups/monitor.py | 4 +- linode_api4/groups/monitor_api.py | 59 +++ linode_api4/linode_client.py | 383 ++++++++++++------ linode_api4/objects/__init__.py | 1 + linode_api4/objects/monitor.py | 4 +- linode_api4/objects/monitor_api.py | 44 ++ .../monitor_services_dbaas_metrics.json | 47 +++ test/integration/conftest.py | 75 +++- .../models/monitor_api/test_monitor_api.py | 12 + test/unit/base.py | 28 +- test/unit/groups/monitor_api_test.py | 52 +++ 14 files changed, 575 insertions(+), 141 deletions(-) create mode 100644 linode_api4/groups/monitor_api.py create mode 100644 linode_api4/objects/monitor_api.py create mode 100644 test/fixtures/monitor_services_dbaas_metrics.json create mode 100644 test/integration/models/monitor_api/test_monitor_api.py create mode 100644 test/unit/groups/monitor_api_test.py diff --git a/linode_api4/__init__.py b/linode_api4/__init__.py index b347b607d..69fa1111c 100644 --- a/linode_api4/__init__.py +++ b/linode_api4/__init__.py @@ -1,7 +1,7 @@ # isort: skip_file from linode_api4.objects import * from linode_api4.errors import ApiError, UnexpectedResponseError -from linode_api4.linode_client import LinodeClient +from linode_api4.linode_client import LinodeClient, MonitorClient from linode_api4.login_client import LinodeLoginClient, OAuthScopes from linode_api4.paginated_list import PaginatedList from linode_api4.polling import EventPoller diff --git a/linode_api4/groups/__init__.py b/linode_api4/groups/__init__.py index 3842042ad..4096cd21c 100644 --- a/linode_api4/groups/__init__.py +++ b/linode_api4/groups/__init__.py @@ -11,6 +11,7 @@ from .lke_tier import * from .longview import * from .monitor import * +from .monitor_api import * from .networking import * from .nodebalancer import * from .object_storage import * diff --git a/linode_api4/groups/group.py b/linode_api4/groups/group.py index c591b7fda..b7c0e1eeb 100644 --- a/linode_api4/groups/group.py +++ b/linode_api4/groups/group.py @@ -3,9 +3,9 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from linode_api4 import LinodeClient + from linode_api4.linode_client import BaseClient class Group: - def __init__(self, client: LinodeClient): + def __init__(self, client: BaseClient): self.client = client diff --git a/linode_api4/groups/monitor.py b/linode_api4/groups/monitor.py index 14b5617c4..2dbfd2285 100644 --- a/linode_api4/groups/monitor.py +++ b/linode_api4/groups/monitor.py @@ -3,9 +3,7 @@ ] from typing import Any, Optional -from linode_api4 import ( - PaginatedList, -) +from linode_api4 import PaginatedList from linode_api4.errors import UnexpectedResponseError from linode_api4.groups import Group from linode_api4.objects import ( diff --git a/linode_api4/groups/monitor_api.py b/linode_api4/groups/monitor_api.py new file mode 100644 index 000000000..48e2b2c30 --- /dev/null +++ b/linode_api4/groups/monitor_api.py @@ -0,0 +1,59 @@ +__all__ = [ + "MetricsGroup", +] + +from typing import Any, Dict, List, Optional, Union + +from linode_api4 import drop_null_keys +from linode_api4.groups import Group +from linode_api4.objects.base import _flatten_request_body_recursive +from linode_api4.objects.monitor_api import EntityMetricOptions, EntityMetrics + + +class MetricsGroup(Group): + """ + Encapsulates Monitor-related methods of the :any:`MonitorClient`. + + This group contains all features related to metrics in the API monitor-api. + """ + + def fetch_metrics( + self, + service_type: str, + entity_ids: list, + metrics: List[Union[EntityMetricOptions, Dict[str, Any]]], + **kwargs, + ) -> Optional[EntityMetrics]: + """ + Returns metrics information for the individual entities within a specific service type. + + API documentation: https://techdocs.akamai.com/linode-api/reference/post-read-metric + + :param service_type: The service being monitored. + Currently, only the Managed Databases (dbaas) service type is supported. + :type service_type: str + + :param entity_ids: The id for each individual entity from a service_type. + :type entity_ids: list + + :param metrics: A list of metric objects, each specifying a metric name and its corresponding aggregation function. + :type metrics: list of EntityMetricOptions or Dict[str, Any] + + :param kwargs: Any other arguments accepted by the api. Please refer to the API documentation for full info. + + :returns: Service metrics requested. + :rtype: EntityMetrics or None + """ + params = { + "entity_ids": entity_ids, + "metrics": metrics, + } + + params.update(kwargs) + + result = self.client.post( + f"/monitor/services/{service_type}/metrics", + data=drop_null_keys(_flatten_request_body_recursive(params)), + ) + + return EntityMetrics.from_json(result) diff --git a/linode_api4/linode_client.py b/linode_api4/linode_client.py index e71f1563e..d1e35761e 100644 --- a/linode_api4/linode_client.py +++ b/linode_api4/linode_client.py @@ -19,6 +19,7 @@ LinodeGroup, LKEGroup, LongviewGroup, + MetricsGroup, MonitorGroup, NetworkingGroup, NodeBalancerGroup, @@ -51,11 +52,48 @@ def get_backoff_time(self): return self.backoff_factor -class LinodeClient: +class BaseClient: + """ + The base class for a client. + + :param token: The authentication token to use for communication with the + API. Can be either a Personal Access Token or an OAuth Token. + :type token: str + :param base_url: The base URL for API requests. Generally, you shouldn't + change this. + :type base_url: str + :param user_agent: What to append to the User Agent of all requests made + by this client. Setting this allows Linode's internal + monitoring applications to track the usage of your + application. Setting this is not necessary, but some + applications may desire this behavior. + :type user_agent: str + :param page_size: The default size to request pages at. If not given, + the API's default page size is used. Valid values + can be found in the API docs, but at time of writing + are between 25 and 500. + :type page_size: int + :param retry: Whether API requests should automatically be retries on known + intermittent responses. + :type retry: bool + :param retry_rate_limit_interval: The amount of time to wait between HTTP request + retries. + :type retry_rate_limit_interval: Union[float, int] + :param retry_max: The number of request retries that should be attempted before + raising an API error. + :type retry_max: int + :type retry_statuses: List of int + :param retry_statuses: Additional HTTP response statuses to retry on. + By default, the client will retry on 408, 429, and 502 + responses. + :param ca_path: The path to a CA file to use for API requests in this client. + :type ca_path: str + """ + def __init__( self, token, - base_url="https://api.linode.com/v4", + base_url, user_agent=None, page_size=None, retry=True, @@ -64,42 +102,6 @@ def __init__( retry_statuses=None, ca_path=None, ): - """ - The main interface to the Linode API. - - :param token: The authentication token to use for communication with the - API. Can be either a Personal Access Token or an OAuth Token. - :type token: str - :param base_url: The base URL for API requests. Generally, you shouldn't - change this. - :type base_url: str - :param user_agent: What to append to the User Agent of all requests made - by this client. Setting this allows Linode's internal - monitoring applications to track the usage of your - application. Setting this is not necessary, but some - applications may desire this behavior. - :type user_agent: str - :param page_size: The default size to request pages at. If not given, - the API's default page size is used. Valid values - can be found in the API docs, but at time of writing - are between 25 and 500. - :type page_size: int - :param retry: Whether API requests should automatically be retries on known - intermittent responses. - :type retry: bool - :param retry_rate_limit_interval: The amount of time to wait between HTTP request - retries. - :type retry_rate_limit_interval: Union[float, int] - :param retry_max: The number of request retries that should be attempted before - raising an API error. - :type retry_max: int - :type retry_statuses: List of int - :param retry_statuses: Additional HTTP response statuses to retry on. - By default, the client will retry on 408, 429, and 502 - responses. - :param ca_path: The path to a CA file to use for API requests in this client. - :type ca_path: str - """ self.base_url = base_url self._add_user_agent = user_agent self.token = token @@ -138,72 +140,6 @@ def __init__( self.session.mount("http://", retry_adapter) self.session.mount("https://", retry_adapter) - #: Access methods related to Linodes - see :any:`LinodeGroup` for - #: more information - self.linode = LinodeGroup(self) - - #: Access methods related to your user - see :any:`ProfileGroup` for - #: more information - self.profile = ProfileGroup(self) - - #: Access methods related to your account - see :any:`AccountGroup` for - #: more information - self.account = AccountGroup(self) - - #: Access methods related to networking on your account - see - #: :any:`NetworkingGroup` for more information - self.networking = NetworkingGroup(self) - - #: Access methods related to support - see :any:`SupportGroup` for more - #: information - self.support = SupportGroup(self) - - #: Access information related to the Longview service - see - #: :any:`LongviewGroup` for more information - self.longview = LongviewGroup(self) - - #: Access methods related to Object Storage - see :any:`ObjectStorageGroup` - #: for more information - self.object_storage = ObjectStorageGroup(self) - - #: Access methods related to LKE - see :any:`LKEGroup` for more information. - self.lke = LKEGroup(self) - - #: Access methods related to Managed Databases - see :any:`DatabaseGroup` for more information. - self.database = DatabaseGroup(self) - - #: Access methods related to NodeBalancers - see :any:`NodeBalancerGroup` for more information. - self.nodebalancers = NodeBalancerGroup(self) - - #: Access methods related to Domains - see :any:`DomainGroup` for more information. - self.domains = DomainGroup(self) - - #: Access methods related to Tags - See :any:`TagGroup` for more information. - self.tags = TagGroup(self) - - #: Access methods related to Volumes - See :any:`VolumeGroup` for more information. - self.volumes = VolumeGroup(self) - - #: Access methods related to Regions - See :any:`RegionGroup` for more information. - self.regions = RegionGroup(self) - - #: Access methods related to Images - See :any:`ImageGroup` for more information. - self.images = ImageGroup(self) - - #: Access methods related to VPCs - See :any:`VPCGroup` for more information. - self.vpcs = VPCGroup(self) - - #: Access methods related to Event polling - See :any:`PollingGroup` for more information. - self.polling = PollingGroup(self) - - #: Access methods related to Beta Program - See :any:`BetaProgramGroup` for more information. - self.beta = BetaProgramGroup(self) - - #: Access methods related to VM placement - See :any:`PlacementAPIGroup` for more information. - self.placement = PlacementAPIGroup(self) - - self.monitor = MonitorGroup(self) - @property def _user_agent(self): return "{}python-linode_api4/{} {}".format( @@ -367,6 +303,164 @@ def __setattr__(self, key, value): super().__setattr__(key, value) + # helper functions + def _get_and_filter( + self, + obj_type, + *filters, + endpoint=None, + parent_id=None, + ): + parsed_filters = None + if filters: + if len(filters) > 1: + parsed_filters = and_( + *filters + ).dct # pylint: disable=no-value-for-parameter + else: + parsed_filters = filters[0].dct + + # Use sepcified endpoint + if endpoint: + return self._get_objects( + endpoint, obj_type, parent_id=parent_id, filters=parsed_filters + ) + else: + return self._get_objects( + obj_type.api_list(), + obj_type, + parent_id=parent_id, + filters=parsed_filters, + ) + + +class LinodeClient(BaseClient): + def __init__( + self, + token, + base_url="https://api.linode.com/v4", + user_agent=None, + page_size=None, + retry=True, + retry_rate_limit_interval=1.0, + retry_max=5, + retry_statuses=None, + ca_path=None, + ): + """ + The main interface to the Linode API. + + :param token: The authentication token to use for communication with the + API. Can be either a Personal Access Token or an OAuth Token. + :type token: str + :param base_url: The base URL for API requests. Generally, you shouldn't + change this. + :type base_url: str + :param user_agent: What to append to the User Agent of all requests made + by this client. Setting this allows Linode's internal + monitoring applications to track the usage of your + application. Setting this is not necessary, but some + applications may desire this behavior. + :type user_agent: str + :param page_size: The default size to request pages at. If not given, + the API's default page size is used. Valid values + can be found in the API docs, but at time of writing + are between 25 and 500. + :type page_size: int + :param retry: Whether API requests should automatically be retries on known + intermittent responses. + :type retry: bool + :param retry_rate_limit_interval: The amount of time to wait between HTTP request + retries. + :type retry_rate_limit_interval: Union[float, int] + :param retry_max: The number of request retries that should be attempted before + raising an API error. + :type retry_max: int + :type retry_statuses: List of int + :param retry_statuses: Additional HTTP response statuses to retry on. + By default, the client will retry on 408, 429, and 502 + responses. + :param ca_path: The path to a CA file to use for API requests in this client. + :type ca_path: str + """ + #: Access methods related to Linodes - see :any:`LinodeGroup` for + #: more information + self.linode = LinodeGroup(self) + + #: Access methods related to your user - see :any:`ProfileGroup` for + #: more information + self.profile = ProfileGroup(self) + + #: Access methods related to your account - see :any:`AccountGroup` for + #: more information + self.account = AccountGroup(self) + + #: Access methods related to networking on your account - see + #: :any:`NetworkingGroup` for more information + self.networking = NetworkingGroup(self) + + #: Access methods related to support - see :any:`SupportGroup` for more + #: information + self.support = SupportGroup(self) + + #: Access information related to the Longview service - see + #: :any:`LongviewGroup` for more information + self.longview = LongviewGroup(self) + + #: Access methods related to Object Storage - see :any:`ObjectStorageGroup` + #: for more information + self.object_storage = ObjectStorageGroup(self) + + #: Access methods related to LKE - see :any:`LKEGroup` for more information. + self.lke = LKEGroup(self) + + #: Access methods related to Managed Databases - see :any:`DatabaseGroup` for more information. + self.database = DatabaseGroup(self) + + #: Access methods related to NodeBalancers - see :any:`NodeBalancerGroup` for more information. + self.nodebalancers = NodeBalancerGroup(self) + + #: Access methods related to Domains - see :any:`DomainGroup` for more information. + self.domains = DomainGroup(self) + + #: Access methods related to Tags - See :any:`TagGroup` for more information. + self.tags = TagGroup(self) + + #: Access methods related to Volumes - See :any:`VolumeGroup` for more information. + self.volumes = VolumeGroup(self) + + #: Access methods related to Regions - See :any:`RegionGroup` for more information. + self.regions = RegionGroup(self) + + #: Access methods related to Images - See :any:`ImageGroup` for more information. + self.images = ImageGroup(self) + + #: Access methods related to VPCs - See :any:`VPCGroup` for more information. + self.vpcs = VPCGroup(self) + + #: Access methods related to Event polling - See :any:`PollingGroup` for more information. + self.polling = PollingGroup(self) + + #: Access methods related to Beta Program - See :any:`BetaProgramGroup` for more information. + self.beta = BetaProgramGroup(self) + + #: Access methods related to VM placement - See :any:`PlacementAPIGroup` for more information. + self.placement = PlacementAPIGroup(self) + + self.monitor = MonitorGroup(self) + + super().__init__( + token=token, + base_url=base_url, + user_agent=user_agent, + page_size=page_size, + retry=retry, + retry_rate_limit_interval=retry_rate_limit_interval, + retry_max=retry_max, + retry_statuses=retry_statuses, + ca_path=ca_path, + ) + def image_create(self, disk, label=None, description=None, tags=None): """ .. note:: This method is an alias to maintain backwards compatibility. @@ -457,32 +551,59 @@ def volume_create(self, label, region=None, linode=None, size=20, **kwargs): label, region=region, linode=linode, size=size, **kwargs ) - # helper functions - def _get_and_filter( + +class MonitorClient(BaseClient): + """ + The main interface to the Monitor API. + + :param token: The authentication Personal Access Token token to use for + communication with the API. You may want to generate one using + Linode Client. For example: + linode_client.monitor.create_token( + service_type="dbaas", entity_ids=[entity_id] + ) + :type token: str + :param base_url: The base URL for monitor API requests. Generally, you shouldn't + change this. + :type base_url: str + :param user_agent: What to append to the User Agent of all requests made + by this client. Setting this allows Linode's internal + monitoring applications to track the usage of your + application. Setting this is not necessary, but some + applications may desire this behavior. + :type user_agent: str + :param page_size: The default size to request pages at. If not given, + the API's default page size is used. Valid values + can be found in the API docs. + :type page_size: int + :param ca_path: The path to a CA file to use for API requests in this client. + :type ca_path: str + """ + + def __init__( self, - obj_type, - *filters, - endpoint=None, - parent_id=None, + token, + base_url="https://monitor-api.linode.com/v2beta", + user_agent=None, + page_size=None, + ca_path=None, + retry=True, + retry_rate_limit_interval=1.0, + retry_max=5, + retry_statuses=None, ): - parsed_filters = None - if filters: - if len(filters) > 1: - parsed_filters = and_( - *filters - ).dct # pylint: disable=no-value-for-parameter - else: - parsed_filters = filters[0].dct - - # Use sepcified endpoint - if endpoint: - return self._get_objects( - endpoint, obj_type, parent_id=parent_id, filters=parsed_filters - ) - else: - return self._get_objects( - obj_type.api_list(), - obj_type, - parent_id=parent_id, - filters=parsed_filters, - ) + #: Access methods related to your monitor metrics - see :any:`MetricsGroup` for + #: more information + self.metrics = MetricsGroup(self) + + super().__init__( + token=token, + base_url=base_url, + user_agent=user_agent, + page_size=page_size, + retry=retry, + retry_rate_limit_interval=retry_rate_limit_interval, + retry_max=retry_max, + retry_statuses=retry_statuses, + ca_path=ca_path, + ) diff --git a/linode_api4/objects/__init__.py b/linode_api4/objects/__init__.py index 7f1542d2a..c847024d8 100644 --- a/linode_api4/objects/__init__.py +++ b/linode_api4/objects/__init__.py @@ -22,3 +22,4 @@ from .beta import * from .placement import * from .monitor import * +from .monitor_api import * diff --git a/linode_api4/objects/monitor.py b/linode_api4/objects/monitor.py index ae3936ee7..ed6ce79a5 100644 --- a/linode_api4/objects/monitor.py +++ b/linode_api4/objects/monitor.py @@ -3,11 +3,13 @@ "MonitorMetricsDefinition", "MonitorService", "MonitorServiceToken", + "AggregateFunction", ] from dataclasses import dataclass, field from typing import List, Optional -from linode_api4.objects import Base, JSONObject, Property, StrEnum +from linode_api4.objects.base import Base, Property +from linode_api4.objects.serializable import JSONObject, StrEnum class AggregateFunction(StrEnum): diff --git a/linode_api4/objects/monitor_api.py b/linode_api4/objects/monitor_api.py new file mode 100644 index 000000000..c3496668c --- /dev/null +++ b/linode_api4/objects/monitor_api.py @@ -0,0 +1,44 @@ +__all__ = [ + "EntityMetrics", + "EntityMetricsData", + "EntityMetricsDataResult", + "EntityMetricsStats", + "EntityMetricOptions", +] +from dataclasses import dataclass, field +from typing import List, Optional + +from linode_api4.objects.monitor import AggregateFunction +from linode_api4.objects.serializable import JSONObject + + +@dataclass +class EntityMetricsStats(JSONObject): + executionTimeMsec: int = 0 + seriesFetched: str = "" + + +@dataclass +class EntityMetricsDataResult(JSONObject): + metric: dict = field(default_factory=dict) + values: list = field(default_factory=list) + + +@dataclass +class EntityMetricsData(JSONObject): + result: Optional[List[EntityMetricsDataResult]] = None + resultType: str = "" + + +@dataclass +class EntityMetrics(JSONObject): + data: Optional[EntityMetricsData] = None + isPartial: bool = False + stats: Optional[EntityMetricsStats] = None + status: str = "" + + +@dataclass +class EntityMetricOptions(JSONObject): + name: str = "" + aggregate_function: AggregateFunction = "" diff --git a/test/fixtures/monitor_services_dbaas_metrics.json b/test/fixtures/monitor_services_dbaas_metrics.json new file mode 100644 index 000000000..67657cb78 --- /dev/null +++ b/test/fixtures/monitor_services_dbaas_metrics.json @@ -0,0 +1,47 @@ +{ + "data": { + "result": [ + { + "metric": { + "entity_id": 13316, + "metric_name": "avg_read_iops", + "node_id": "primary-9" + }, + "values": [ + [ + 1728996500, + "90.55555555555556" + ], + [ + 1729043400, + "14890.583333333334" + ] + ] + }, + { + "metric": { + "entity_id": 13217, + "metric_name": "avg_cpu_usage", + "node_id": "primary-0" + }, + "values": [ + [ + 1728996500, + "12.45" + ], + [ + 1729043400, + "18.67" + ] + ] + } + ], + "resultType": "matrix" + }, + "isPartial": false, + "stats": { + "executionTimeMsec": 21, + "seriesFetched": "2" + }, + "status": "success" +} \ No newline at end of file diff --git a/test/integration/conftest.py b/test/integration/conftest.py index 8c7d44a57..0a0566775 100644 --- a/test/integration/conftest.py +++ b/test/integration/conftest.py @@ -5,15 +5,21 @@ from test.integration.helpers import ( get_test_label, send_request_when_resource_available, + wait_for_condition, ) +from test.integration.models.database.helpers import get_db_engine_id from typing import Optional, Set import pytest import requests from requests.exceptions import ConnectionError, RequestException -from linode_api4 import PlacementGroupPolicy, PlacementGroupType -from linode_api4.linode_client import LinodeClient +from linode_api4 import ( + PlacementGroupPolicy, + PlacementGroupType, + PostgreSQLDatabase, +) +from linode_api4.linode_client import LinodeClient, MonitorClient from linode_api4.objects import Region ENV_TOKEN_NAME = "LINODE_TOKEN" @@ -521,3 +527,68 @@ def linode_for_vlan_tests(test_linode_client, e2e_test_firewall): yield linode_instance linode_instance.delete() + + +@pytest.fixture(scope="session") +def test_create_postgres_db(test_linode_client): + client = test_linode_client + label = get_test_label() + "-postgresqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "postgresql") + dbtype = "g6-standard-1" + + db = client.database.postgresql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + ) + + def get_db_status(): + return db.status == "active" + + # TAKES 15-30 MINUTES TO FULLY PROVISION DB + wait_for_condition(60, 2000, get_db_status) + + yield db + + send_request_when_resource_available(300, db.delete) + + +@pytest.fixture(scope="session") +def get_monitor_token_for_db_entities(test_linode_client): + client = test_linode_client + + dbs = client.database.postgresql_instances() + + if len(dbs) < 1: + db_id = test_create_postgres_db.id + else: + db_id = dbs[0].id + + region = client.load(PostgreSQLDatabase, db_id).region + dbs = client.database.instances() + + # only collect entity_ids in the same region + entity_ids = [db.id for db in dbs if db.region == region] + + # create token for the particular service + token = client.monitor.create_token( + service_type="dbaas", entity_ids=entity_ids + ) + + yield token, entity_ids + + +@pytest.fixture(scope="session") +def test_monitor_client(get_monitor_token_for_db_entities): + api_ca_file = get_api_ca_file() + token, entity_ids = get_monitor_token_for_db_entities + + client = MonitorClient( + token.token, + ca_path=api_ca_file, + ) + + return client, entity_ids diff --git a/test/integration/models/monitor_api/test_monitor_api.py b/test/integration/models/monitor_api/test_monitor_api.py new file mode 100644 index 000000000..842a8c420 --- /dev/null +++ b/test/integration/models/monitor_api/test_monitor_api.py @@ -0,0 +1,12 @@ +def test_monitor_api_fetch_dbaas_metrics(test_monitor_client): + client, entity_ids = test_monitor_client + + metrics = client.metrics.fetch_metrics( + "dbaas", + entity_ids=entity_ids, + metrics=[{"name": "read_iops", "aggregate_function": "avg"}], + relative_time_duration={"unit": "hr", "value": 1}, + ) + + assert metrics.status == "success" + assert len(metrics.data.result) > 0 diff --git a/test/unit/base.py b/test/unit/base.py index e143f8f64..bc0ec2f08 100644 --- a/test/unit/base.py +++ b/test/unit/base.py @@ -4,7 +4,7 @@ from mock import patch -from linode_api4 import LinodeClient +from linode_api4 import LinodeClient, MonitorClient FIXTURES = TestFixtures() @@ -202,3 +202,29 @@ def mock_delete(self): mocked requests """ return MethodMock("delete", {}) + + +class MonitorClientBaseCase(TestCase): + def setUp(self): + self.client = MonitorClient("testing", base_url="/") + + self.get_patch = patch( + "linode_api4.linode_client.requests.Session.get", + side_effect=mock_get, + ) + self.get_patch.start() + + def tearDown(self): + self.get_patch.stop() + + def mock_post(self, return_dct): + """ + Returns a MethodMock mocking a POST. This should be used in a with + statement. + + :param return_dct: The JSON that should be returned from this POST + + :returns: A MethodMock object who will capture the parameters of the + mocked requests + """ + return MethodMock("post", return_dct) diff --git a/test/unit/groups/monitor_api_test.py b/test/unit/groups/monitor_api_test.py new file mode 100644 index 000000000..c34db068f --- /dev/null +++ b/test/unit/groups/monitor_api_test.py @@ -0,0 +1,52 @@ +from test.unit.base import MonitorClientBaseCase + +from linode_api4.objects import AggregateFunction, EntityMetricOptions + + +class MonitorAPITest(MonitorClientBaseCase): + """ + Tests methods of the Monitor API group + """ + + def test_fetch_metrics(self): + service_type = "dbaas" + url = f"/monitor/services/{service_type}/metrics" + with self.mock_post(url) as m: + metrics = self.client.metrics.fetch_metrics( + service_type, + entity_ids=[13217, 13316], + metrics=[ + EntityMetricOptions( + name="avg_read_iops", + aggregate_function=AggregateFunction("avg"), + ), + {"name": "avg_cpu_usage", "aggregate_function": "avg"}, + ], + relative_time_duration={"unit": "hr", "value": 1}, + ) + + # assert call data + assert m.call_url == url + assert m.call_data == { + "entity_ids": [13217, 13316], + "metrics": [ + {"name": "avg_read_iops", "aggregate_function": "avg"}, + {"name": "avg_cpu_usage", "aggregate_function": "avg"}, + ], + "relative_time_duration": {"unit": "hr", "value": 1}, + } + + # assert the metrics data + metric_data = metrics.data.result[0] + + assert metrics.data.resultType == "matrix" + assert metric_data.metric["entity_id"] == 13316 + assert metric_data.metric["metric_name"] == "avg_read_iops" + assert metric_data.metric["node_id"] == "primary-9" + assert metric_data.values[0][0] == 1728996500 + assert metric_data.values[0][1] == "90.55555555555556" + + assert metrics.status == "success" + assert metrics.stats.executionTimeMsec == 21 + assert metrics.stats.seriesFetched == "2" + assert not metrics.isPartial From e18c8f1d775df1a635b6eee7fbf5f26909aeefe3 Mon Sep 17 00:00:00 2001 From: Erik Zilber Date: Fri, 15 Aug 2025 11:43:15 -0400 Subject: [PATCH 24/34] Project: Host Maintenance Policy (#582) * Added support for Host/VM Maintenance (#532) * Added support for changes in Account Events, Maintenance, and Settings * Added support for changes in Instance and added Maintenance group * Add docstring and fix imports * Updated maintenance_policy_id to maintenance_policy (#564) * fix * Fix failing unit tests * fix * integration test * address_PR_comments * Added v4beta notices (#578) * Added missing doc link * Added maintenance to LinodeClient * Addressed PR comments --------- Co-authored-by: vshanthe Co-authored-by: Vinay <143587840+vshanthe@users.noreply.github.com> --- linode_api4/groups/__init__.py | 1 + linode_api4/groups/account.py | 2 +- linode_api4/groups/linode.py | 7 ++ linode_api4/groups/maintenance.py | 25 +++++ linode_api4/linode_client.py | 5 + linode_api4/objects/account.py | 11 +- linode_api4/objects/linode.py | 3 + test/fixtures/account_events_123.json | 56 +++++----- test/fixtures/account_maintenance.json | 58 +++++++--- test/fixtures/account_settings.json | 3 +- test/fixtures/linode_instances.json | 6 +- test/fixtures/maintenance_policies.json | 28 +++++ .../models/account/test_account.py | 25 +++++ test/integration/models/linode/test_linode.py | 44 ++++++++ .../models/maintenance/test_maintenance.py | 12 ++ test/unit/groups/linode_test.py | 14 +++ test/unit/linode_client_test.py | 103 +++++++++++++++++- test/unit/objects/account_test.py | 47 +++++++- test/unit/objects/linode_test.py | 3 + 19 files changed, 397 insertions(+), 56 deletions(-) create mode 100644 linode_api4/groups/maintenance.py create mode 100644 test/fixtures/maintenance_policies.json create mode 100644 test/integration/models/maintenance/test_maintenance.py diff --git a/linode_api4/groups/__init__.py b/linode_api4/groups/__init__.py index 4096cd21c..6f87eeb65 100644 --- a/linode_api4/groups/__init__.py +++ b/linode_api4/groups/__init__.py @@ -10,6 +10,7 @@ from .lke import * from .lke_tier import * from .longview import * +from .maintenance import * from .monitor import * from .monitor_api import * from .networking import * diff --git a/linode_api4/groups/account.py b/linode_api4/groups/account.py index 564e55eea..6f8c6528e 100644 --- a/linode_api4/groups/account.py +++ b/linode_api4/groups/account.py @@ -201,7 +201,7 @@ def maintenance(self): """ Returns a collection of Maintenance objects for any entity a user has permissions to view. Cancelled Maintenance objects are not returned. - API Documentation: https://techdocs.akamai.com/linode-api/reference/get-account-logins + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-maintenance :returns: A list of Maintenance objects on this account. :rtype: List of Maintenance objects as MappedObjects diff --git a/linode_api4/groups/linode.py b/linode_api4/groups/linode.py index 48f0d43b6..4c4dbfdbf 100644 --- a/linode_api4/groups/linode.py +++ b/linode_api4/groups/linode.py @@ -153,6 +153,7 @@ def instance_create( int, ] ] = None, + maintenance_policy: Optional[str] = None, **kwargs, ): """ @@ -296,6 +297,11 @@ def instance_create( :type interfaces: list[ConfigInterface] or list[dict[str, Any]] :param placement_group: A Placement Group to create this Linode under. :type placement_group: Union[InstancePlacementGroupAssignment, PlacementGroup, Dict[str, Any], int] + :param maintenance_policy: The slug of the maintenance policy to apply during maintenance. + If not provided, the default policy (linode/migrate) will be applied. + NOTE: This field is in beta and may only + function if base_url is set to `https://api.linode.com/v4beta`. + :type maintenance_policy: str :returns: A new Instance object, or a tuple containing the new Instance and the generated password. @@ -327,6 +333,7 @@ def instance_create( "firewall_id": firewall, "backup_id": backup, "stackscript_id": stackscript, + "maintenance_policy": maintenance_policy, # Special cases "disk_encryption": ( str(disk_encryption) if disk_encryption else None diff --git a/linode_api4/groups/maintenance.py b/linode_api4/groups/maintenance.py new file mode 100644 index 000000000..f41780dfb --- /dev/null +++ b/linode_api4/groups/maintenance.py @@ -0,0 +1,25 @@ +from linode_api4.groups import Group +from linode_api4.objects import MappedObject + + +class MaintenanceGroup(Group): + """ + Collections related to Maintenance. + """ + + def maintenance_policies(self): + """ + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + Returns a collection of MaintenancePolicy objects representing + available maintenance policies that can be applied to Linodes + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-policies + + :returns: A list of Maintenance Policies that can be applied to Linodes + :rtype: List of MaintenancePolicy objects as MappedObjects + """ + + result = self.client.get("/maintenance/policies", model=self) + + return [MappedObject(**r) for r in result["data"]] diff --git a/linode_api4/linode_client.py b/linode_api4/linode_client.py index d1e35761e..1d9f0bba4 100644 --- a/linode_api4/linode_client.py +++ b/linode_api4/linode_client.py @@ -19,6 +19,7 @@ LinodeGroup, LKEGroup, LongviewGroup, + MaintenanceGroup, MetricsGroup, MonitorGroup, NetworkingGroup, @@ -399,6 +400,10 @@ def __init__( #: :any:`NetworkingGroup` for more information self.networking = NetworkingGroup(self) + #: Access methods related to maintenance on your account - see + #: :any:`MaintenanceGroup` for more information + self.maintenance = MaintenanceGroup(self) + #: Access methods related to support - see :any:`SupportGroup` for more #: information self.support = SupportGroup(self) diff --git a/linode_api4/objects/account.py b/linode_api4/objects/account.py index 836f41522..2ad1b6482 100644 --- a/linode_api4/objects/account.py +++ b/linode_api4/objects/account.py @@ -198,6 +198,9 @@ class AccountSettings(Base): ), "object_storage": Property(), "backups_enabled": Property(mutable=True), + "maintenance_policy": Property( + mutable=True + ), # Note: This field is only available when using v4beta. } @@ -220,12 +223,18 @@ class Event(Base): "user_id": Property(), "username": Property(), "entity": Property(), - "time_remaining": Property(), + "time_remaining": Property(), # Deprecated "rate": Property(), "status": Property(), "duration": Property(), "secondary_entity": Property(), "message": Property(), + "maintenance_policy_set": Property(), # Note: This field is only available when using v4beta. + "description": Property(), + "source": Property(), + "not_before": Property(is_datetime=True), + "start_time": Property(is_datetime=True), + "complete_time": Property(is_datetime=True), } @property diff --git a/linode_api4/objects/linode.py b/linode_api4/objects/linode.py index c70dd7965..2d051fb44 100644 --- a/linode_api4/objects/linode.py +++ b/linode_api4/objects/linode.py @@ -686,6 +686,9 @@ class Instance(Base): "disk_encryption": Property(), "lke_cluster_id": Property(), "capabilities": Property(unordered=True), + "maintenance_policy": Property( + mutable=True + ), # Note: This field is only available when using v4beta. } @property diff --git a/test/fixtures/account_events_123.json b/test/fixtures/account_events_123.json index 4c2b7141d..b24156f90 100644 --- a/test/fixtures/account_events_123.json +++ b/test/fixtures/account_events_123.json @@ -1,27 +1,31 @@ { - "action": "ticket_create", - "created": "2018-01-01T00:01:01", - "duration": 300.56, - "entity": { - "id": 11111, - "label": "Problem booting my Linode", - "type": "ticket", - "url": "/v4/support/tickets/11111" - }, - "id": 123, - "message": "None", - "percent_complete": null, - "rate": null, - "read": true, - "secondary_entity": { - "id": "linode/debian9", - "label": "linode1234", - "type": "linode", - "url": "/v4/linode/instances/1234" - }, - "seen": true, - "status": null, - "time_remaining": null, - "username": "exampleUser" - } - \ No newline at end of file + "action": "ticket_create", + "created": "2025-03-25T12:00:00", + "duration": 300.56, + "entity": { + "id": 11111, + "label": "Problem booting my Linode", + "type": "ticket", + "url": "/v4/support/tickets/11111" + }, + "id": 123, + "message": "Ticket created for user issue.", + "percent_complete": null, + "rate": null, + "read": true, + "secondary_entity": { + "id": "linode/debian9", + "label": "linode1234", + "type": "linode", + "url": "/v4/linode/instances/1234" + }, + "seen": true, + "status": "completed", + "username": "exampleUser", + "maintenance_policy_set": "Tentative", + "description": "Scheduled maintenance", + "source": "user", + "not_before": "2025-03-25T12:00:00", + "start_time": "2025-03-25T12:30:00", + "complete_time": "2025-03-25T13:00:00" +} \ No newline at end of file diff --git a/test/fixtures/account_maintenance.json b/test/fixtures/account_maintenance.json index aeeab91e6..30f8ed19e 100644 --- a/test/fixtures/account_maintenance.json +++ b/test/fixtures/account_maintenance.json @@ -1,19 +1,41 @@ { - "data": [ - { - "entity": { - "id": 123, - "label": "demo-linode", - "type": "Linode", - "url": "https://api.linode.com/v4/linode/instances/{linodeId}" - }, - "reason": "This maintenance will allow us to update the BIOS on the host's motherboard.", - "status": "started", - "type": "reboot", - "when": "2020-07-09T00:01:01" - } - ], - "page": 1, - "pages": 1, - "results": 1 -} \ No newline at end of file + "pages": 1, + "page": 1, + "results": 2, + "data": [ + { + "entity": { + "id": 1234, + "label": "Linode #1234", + "type": "linode", + "url": "/linodes/1234" + }, + "reason": "Scheduled upgrade to faster NVMe hardware.", + "type": "linode_migrate", + "maintenance_policy_set": "linode/power_off_on", + "description": "Scheduled Maintenance", + "source": "platform", + "not_before": "2025-03-25T10:00:00Z", + "start_time": "2025-03-25T12:00:00Z", + "complete_time": "2025-03-25T14:00:00Z", + "status": "scheduled" + }, + { + "entity": { + "id": 1234, + "label": "Linode #1234", + "type": "linode", + "url": "/linodes/1234" + }, + "reason": "Pending migration of Linode #1234 to a new host.", + "type": "linode_migrate", + "maintenance_policy_set": "linode/migrate", + "description": "Emergency Maintenance", + "source": "user", + "not_before": "2025-03-26T15:00:00Z", + "start_time": "2025-03-26T15:00:00Z", + "complete_time": "2025-03-26T17:00:00Z", + "status": "in-progress" + } + ] +} diff --git a/test/fixtures/account_settings.json b/test/fixtures/account_settings.json index 77a2fdac3..dda69f1ab 100644 --- a/test/fixtures/account_settings.json +++ b/test/fixtures/account_settings.json @@ -3,5 +3,6 @@ "managed": false, "network_helper": false, "object_storage": "active", - "backups_enabled": true + "backups_enabled": true, + "maintenance_policy": "linode/migrate" } diff --git a/test/fixtures/linode_instances.json b/test/fixtures/linode_instances.json index 38a3cf912..452fc354d 100644 --- a/test/fixtures/linode_instances.json +++ b/test/fixtures/linode_instances.json @@ -48,7 +48,8 @@ "label": "test", "placement_group_type": "anti_affinity:local", "placement_group_policy": "strict" - } + }, + "maintenance_policy" : "linode/migrate" }, { "group": "test", @@ -90,7 +91,8 @@ "watchdog_enabled": false, "disk_encryption": "enabled", "lke_cluster_id": 18881, - "placement_group": null + "placement_group": null, + "maintenance_policy" : "linode/power_off_on" } ] } diff --git a/test/fixtures/maintenance_policies.json b/test/fixtures/maintenance_policies.json new file mode 100644 index 000000000..409255a07 --- /dev/null +++ b/test/fixtures/maintenance_policies.json @@ -0,0 +1,28 @@ +{ + "data": [ + { + "slug": "linode/migrate", + "label": "Migrate", + "description": "Migrates the Linode to a new host while it remains fully operational. Recommended for maximizing availability.", + "type": "migrate", + "notification_period_sec": 3600, + "is_default": true + }, + { + "slug": "linode/power_off_on", + "label": "Power Off/Power On", + "description": "Powers off the Linode at the start of the maintenance event and reboots it once the maintenance finishes. Recommended for maximizing performance.", + "type": "power_off_on", + "notification_period_sec": 1800, + "is_default": false + }, + { + "slug": "private/12345", + "label": "Critical Workload - Avoid Migration", + "description": "Custom policy designed to power off and perform maintenance during user-defined windows only.", + "type": "power_off_on", + "notification_period_sec": 7200, + "is_default": false + } + ] +} \ No newline at end of file diff --git a/test/integration/models/account/test_account.py b/test/integration/models/account/test_account.py index 72cd97cda..805f713b6 100644 --- a/test/integration/models/account/test_account.py +++ b/test/integration/models/account/test_account.py @@ -59,6 +59,31 @@ def test_get_account_settings(test_linode_client): assert "longview_subscription" in str(account_settings._raw_json) assert "backups_enabled" in str(account_settings._raw_json) assert "object_storage" in str(account_settings._raw_json) + assert "maintenance_policy" in str(account_settings._raw_json) + + +def test_update_maintenance_policy(test_linode_client): + client = test_linode_client + settings = client.load(AccountSettings(client, ""), "") + + original_policy = settings.maintenance_policy + new_policy = ( + "linode/power_off_on" + if original_policy == "linode/migrate" + else "linode/migrate" + ) + + settings.maintenance_policy = new_policy + settings.save() + + updated = client.load(AccountSettings(client, ""), "") + assert updated.maintenance_policy == new_policy + + settings.maintenance_policy = original_policy + settings.save() + + updated = client.load(AccountSettings(client, ""), "") + assert updated.maintenance_policy == original_policy @pytest.mark.smoke diff --git a/test/integration/models/linode/test_linode.py b/test/integration/models/linode/test_linode.py index 97965f2b9..77af1e218 100644 --- a/test/integration/models/linode/test_linode.py +++ b/test/integration/models/linode/test_linode.py @@ -877,3 +877,47 @@ def test_delete_interface_containing_vpc( # returns true when delete successful assert result + + +def test_create_linode_with_maintenance_policy(test_linode_client): + client = test_linode_client + region = get_region(client, {"Linodes"}, site_type="core") + label = get_test_label() + + policies = client.maintenance.maintenance_policies() + assert policies, "No maintenance policies returned from API" + + non_default_policy = next((p for p in policies if not p.is_default), None) + assert non_default_policy, "No non-default maintenance policy available" + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label + "_with_policy", + maintenance_policy_id=non_default_policy.slug, + ) + + assert linode_instance.id is not None + assert linode_instance.label.startswith(label) + assert linode_instance.maintenance_policy == non_default_policy.slug + + linode_instance.delete() + + +def test_update_linode_maintenance_policy(create_linode, test_linode_client): + client = test_linode_client + linode = create_linode + + policies = client.maintenance.maintenance_policies() + assert policies, "No maintenance policies returned from API" + + non_default_policy = next((p for p in policies if not p.is_default), None) + assert non_default_policy, "No non-default maintenance policy found" + + linode.maintenance_policy_id = non_default_policy.slug + result = linode.save() + + linode.invalidate() + assert result + assert linode.maintenance_policy_id == non_default_policy.slug diff --git a/test/integration/models/maintenance/test_maintenance.py b/test/integration/models/maintenance/test_maintenance.py new file mode 100644 index 000000000..509d06cf6 --- /dev/null +++ b/test/integration/models/maintenance/test_maintenance.py @@ -0,0 +1,12 @@ +def test_get_maintenance_policies(test_linode_client): + client = test_linode_client + + policies = client.maintenance.maintenance_policies() + + assert isinstance(policies, list) + assert all(hasattr(p, "slug") for p in policies) + + slugs = [p.slug for p in policies] + assert any( + slug in slugs for slug in ["linode/migrate", "linode/power_off_on"] + ) diff --git a/test/unit/groups/linode_test.py b/test/unit/groups/linode_test.py index 8112a5d93..8a6697c81 100644 --- a/test/unit/groups/linode_test.py +++ b/test/unit/groups/linode_test.py @@ -96,6 +96,20 @@ def test_create_with_placement_group(self): m.call_data["placement_group"], {"id": 123, "compliant_only": True} ) + def test_create_with_maintenance_policy(self): + """ + Tests that you can create a Linode with a maintenance policy + """ + + with self.mock_post("linode/instances/123") as m: + self.client.linode.instance_create( + "g6-nanode-1", + "eu-west", + maintenance_policy="linode/migrate", + ) + + self.assertEqual(m.call_data["maintenance_policy"], "linode/migrate") + class TypeTest(ClientBaseCase): def test_get_types(self): diff --git a/test/unit/linode_client_test.py b/test/unit/linode_client_test.py index c79c0a88d..3c33a16f2 100644 --- a/test/unit/linode_client_test.py +++ b/test/unit/linode_client_test.py @@ -307,6 +307,59 @@ def get_mock(*params, verify=True, **kwargs): assert called +class MaintenanceGroupTest(ClientBaseCase): + """ + Tests methods of the MaintenanceGroup + """ + + def test_maintenance(self): + """ + Tests that maintenance can be retrieved + Tests that maintenance can be retrieved + """ + with self.mock_get("/maintenance/policies") as m: + result = self.client.maintenance.maintenance_policies() + + self.assertEqual(m.call_url, "/maintenance/policies") + self.assertEqual(len(result), 3) + + policy_migrate = result[0] + policy_power_off_on = result[1] + policy_custom = result[2] + + self.assertEqual(policy_migrate.slug, "linode/migrate") + self.assertEqual(policy_migrate.label, "Migrate") + self.assertEqual( + policy_migrate.description, + "Migrates the Linode to a new host while it remains fully operational. Recommended for maximizing availability.", + ) + self.assertEqual(policy_migrate.type, "migrate") + self.assertEqual(policy_migrate.notification_period_sec, 3600) + self.assertTrue(policy_migrate.is_default) + + self.assertEqual(policy_power_off_on.slug, "linode/power_off_on") + self.assertEqual(policy_power_off_on.label, "Power Off/Power On") + self.assertEqual( + policy_power_off_on.description, + "Powers off the Linode at the start of the maintenance event and reboots it once the maintenance finishes. Recommended for maximizing performance.", + ) + self.assertEqual(policy_power_off_on.type, "power_off_on") + self.assertEqual(policy_power_off_on.notification_period_sec, 1800) + self.assertFalse(policy_power_off_on.is_default) + + self.assertEqual(policy_custom.slug, "private/12345") + self.assertEqual( + policy_custom.label, "Critical Workload - Avoid Migration" + ) + self.assertEqual( + policy_custom.description, + "Custom policy designed to power off and perform maintenance during user-defined windows only.", + ) + self.assertEqual(policy_custom.type, "power_off_on") + self.assertEqual(policy_custom.notification_period_sec, 7200) + self.assertFalse(policy_custom.is_default) + + class AccountGroupTest(ClientBaseCase): """ Tests methods of the AccountGroup @@ -353,12 +406,56 @@ def test_maintenance(self): """ with self.mock_get("/account/maintenance") as m: result = self.client.account.maintenance() + self.assertEqual(m.call_url, "/account/maintenance") - self.assertEqual(len(result), 1) + self.assertEqual(len(result), 2) + + maintenance_1 = result[0] + maintenance_2 = result[1] + + # First maintenance + self.assertEqual( + maintenance_1.reason, + "Scheduled upgrade to faster NVMe hardware.", + ) + self.assertEqual(maintenance_1.entity.id, 1234) + self.assertEqual(maintenance_1.entity.label, "Linode #1234") + self.assertEqual(maintenance_1.entity.type, "linode") + self.assertEqual(maintenance_1.entity.url, "/linodes/1234") + self.assertEqual( + maintenance_1.maintenance_policy_set, "linode/power_off_on" + ) + self.assertEqual(maintenance_1.description, "Scheduled Maintenance") + self.assertEqual(maintenance_1.source, "platform") + self.assertEqual(maintenance_1.not_before, "2025-03-25T10:00:00Z") + self.assertEqual(maintenance_1.start_time, "2025-03-25T12:00:00Z") + self.assertEqual( + maintenance_1.complete_time, "2025-03-25T14:00:00Z" + ) + self.assertEqual(maintenance_1.status, "scheduled") + self.assertEqual(maintenance_1.type, "linode_migrate") + + # Second maintenance + self.assertEqual( + maintenance_2.reason, + "Pending migration of Linode #1234 to a new host.", + ) + self.assertEqual(maintenance_2.entity.id, 1234) + self.assertEqual(maintenance_2.entity.label, "Linode #1234") + self.assertEqual(maintenance_2.entity.type, "linode") + self.assertEqual(maintenance_2.entity.url, "/linodes/1234") + self.assertEqual( + maintenance_2.maintenance_policy_set, "linode/migrate" + ) + self.assertEqual(maintenance_2.description, "Emergency Maintenance") + self.assertEqual(maintenance_2.source, "user") + self.assertEqual(maintenance_2.not_before, "2025-03-26T15:00:00Z") + self.assertEqual(maintenance_2.start_time, "2025-03-26T15:00:00Z") self.assertEqual( - result[0].reason, - "This maintenance will allow us to update the BIOS on the host's motherboard.", + maintenance_2.complete_time, "2025-03-26T17:00:00Z" ) + self.assertEqual(maintenance_2.status, "in-progress") + self.assertEqual(maintenance_2.type, "linode_migrate") def test_notifications(self): """ diff --git a/test/unit/objects/account_test.py b/test/unit/objects/account_test.py index 1f9da98fb..c55587adb 100644 --- a/test/unit/objects/account_test.py +++ b/test/unit/objects/account_test.py @@ -121,6 +121,25 @@ def test_get_account_settings(self): self.assertEqual(settings.network_helper, False) self.assertEqual(settings.object_storage, "active") self.assertEqual(settings.backups_enabled, True) + self.assertEqual(settings.maintenance_policy, "linode/migrate") + + def test_update_account_settings(self): + """ + Tests that account settings can be updated + """ + with self.mock_put("account/settings") as m: + settings = AccountSettings(self.client, False, {}) + + settings.maintenance_policy = "linode/migrate" + settings.save() + + self.assertEqual(m.call_url, "/account/settings") + self.assertEqual( + m.call_data, + { + "maintenance_policy": "linode/migrate", + }, + ) def test_get_event(self): """ @@ -129,20 +148,40 @@ def test_get_event(self): event = Event(self.client, 123, {}) self.assertEqual(event.action, "ticket_create") - self.assertEqual(event.created, datetime(2018, 1, 1, 0, 1, 1)) + self.assertEqual(event.created, datetime(2025, 3, 25, 12, 0, 0)) self.assertEqual(event.duration, 300.56) + self.assertIsNotNone(event.entity) + self.assertEqual(event.entity.id, 11111) + self.assertEqual(event.entity.label, "Problem booting my Linode") + self.assertEqual(event.entity.type, "ticket") + self.assertEqual(event.entity.url, "/v4/support/tickets/11111") + self.assertEqual(event.id, 123) - self.assertEqual(event.message, "None") + self.assertEqual(event.message, "Ticket created for user issue.") self.assertIsNone(event.percent_complete) self.assertIsNone(event.rate) self.assertTrue(event.read) + self.assertIsNotNone(event.secondary_entity) + self.assertEqual(event.secondary_entity.id, "linode/debian9") + self.assertEqual(event.secondary_entity.label, "linode1234") + self.assertEqual(event.secondary_entity.type, "linode") + self.assertEqual( + event.secondary_entity.url, "/v4/linode/instances/1234" + ) + self.assertTrue(event.seen) - self.assertIsNone(event.status) - self.assertIsNone(event.time_remaining) + self.assertEqual(event.status, "completed") self.assertEqual(event.username, "exampleUser") + self.assertEqual(event.maintenance_policy_set, "Tentative") + self.assertEqual(event.description, "Scheduled maintenance") + self.assertEqual(event.source, "user") + self.assertEqual(event.not_before, datetime(2025, 3, 25, 12, 0, 0)) + self.assertEqual(event.start_time, datetime(2025, 3, 25, 12, 30, 0)) + self.assertEqual(event.complete_time, datetime(2025, 3, 25, 13, 0, 0)) + def test_get_invoice(self): """ Tests that an invoice is loaded correctly by ID diff --git a/test/unit/objects/linode_test.py b/test/unit/objects/linode_test.py index 6016d2776..8fa3cdbb3 100644 --- a/test/unit/objects/linode_test.py +++ b/test/unit/objects/linode_test.py @@ -40,6 +40,7 @@ def test_get_linode(self): linode.disk_encryption, InstanceDiskEncryptionType.disabled ) self.assertEqual(linode.lke_cluster_id, None) + self.assertEqual(linode.maintenance_policy, "linode/migrate") json = linode._raw_json self.assertIsNotNone(json) @@ -153,6 +154,7 @@ def test_update_linode(self): linode.label = "NewLinodeLabel" linode.group = "new_group" + linode.maintenance_policy = "linode/power_off_on" linode.save() self.assertEqual(m.call_url, "/linode/instances/123") @@ -174,6 +176,7 @@ def test_update_linode(self): "group": "new_group", "tags": ["something"], "watchdog_enabled": True, + "maintenance_policy": "linode/power_off_on", }, ) From 59188246548ecf1691bdaf36a8fc2672da3cbd82 Mon Sep 17 00:00:00 2001 From: Erik Zilber Date: Fri, 15 Aug 2025 14:42:15 -0400 Subject: [PATCH 25/34] Fixed maintenance policy test (#584) * Fixed maintenance policy test * Add note to replace region in GA --- test/integration/models/linode/test_linode.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/models/linode/test_linode.py b/test/integration/models/linode/test_linode.py index 77af1e218..7b3e836d1 100644 --- a/test/integration/models/linode/test_linode.py +++ b/test/integration/models/linode/test_linode.py @@ -881,7 +881,8 @@ def test_delete_interface_containing_vpc( def test_create_linode_with_maintenance_policy(test_linode_client): client = test_linode_client - region = get_region(client, {"Linodes"}, site_type="core") + # TODO: Replace with random region after GA + region = "eu-central" label = get_test_label() policies = client.maintenance.maintenance_policies() From b66a974234e355bd12f564c0b5a44dfd7394d0a7 Mon Sep 17 00:00:00 2001 From: Vinay <143587840+vshanthe@users.noreply.github.com> Date: Mon, 18 Aug 2025 20:32:20 +0530 Subject: [PATCH 26/34] fix (#585) --- test/integration/models/linode/test_linode.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/models/linode/test_linode.py b/test/integration/models/linode/test_linode.py index 7b3e836d1..52d948d26 100644 --- a/test/integration/models/linode/test_linode.py +++ b/test/integration/models/linode/test_linode.py @@ -882,7 +882,7 @@ def test_delete_interface_containing_vpc( def test_create_linode_with_maintenance_policy(test_linode_client): client = test_linode_client # TODO: Replace with random region after GA - region = "eu-central" + region = "ap-south" label = get_test_label() policies = client.maintenance.maintenance_policies() @@ -896,7 +896,7 @@ def test_create_linode_with_maintenance_policy(test_linode_client): region, image="linode/debian12", label=label + "_with_policy", - maintenance_policy_id=non_default_policy.slug, + maintenance_policy=non_default_policy.slug, ) assert linode_instance.id is not None From 4825c2b1c3444da836b69897b977aa09bd985c25 Mon Sep 17 00:00:00 2001 From: Ye Chen <127243817+yec-akamai@users.noreply.github.com> Date: Mon, 18 Aug 2025 11:24:06 -0400 Subject: [PATCH 27/34] Fix monitor-api integration test (#586) * fix test * lint --- test/integration/conftest.py | 4 +++- test/integration/models/monitor_api/test_monitor_api.py | 1 - 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration/conftest.py b/test/integration/conftest.py index 0a0566775..75f0c8f5d 100644 --- a/test/integration/conftest.py +++ b/test/integration/conftest.py @@ -557,7 +557,9 @@ def get_db_status(): @pytest.fixture(scope="session") -def get_monitor_token_for_db_entities(test_linode_client): +def get_monitor_token_for_db_entities( + test_linode_client, test_create_postgres_db +): client = test_linode_client dbs = client.database.postgresql_instances() diff --git a/test/integration/models/monitor_api/test_monitor_api.py b/test/integration/models/monitor_api/test_monitor_api.py index 842a8c420..d9fd755b3 100644 --- a/test/integration/models/monitor_api/test_monitor_api.py +++ b/test/integration/models/monitor_api/test_monitor_api.py @@ -9,4 +9,3 @@ def test_monitor_api_fetch_dbaas_metrics(test_monitor_client): ) assert metrics.status == "success" - assert len(metrics.data.result) > 0 From 0c32726f3ac35dcd4a2dc8fb70e03e9ce0066fe4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Aug 2025 02:30:24 -0400 Subject: [PATCH 28/34] build(deps): bump actions/checkout from 4 to 5 (#579) Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 5. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 4 ++-- .github/workflows/codeql.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/e2e-test-pr.yml | 4 ++-- .github/workflows/e2e-test.yml | 8 ++++---- .github/workflows/labeler.yml | 2 +- .github/workflows/nightly-smoke-tests.yml | 2 +- .github/workflows/publish-pypi.yaml | 2 +- .github/workflows/release-cross-repo-test.yml | 4 ++-- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1fd2ad747..d9863f1fd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - name: checkout repo - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: setup python 3 uses: actions/setup-python@v5 @@ -33,7 +33,7 @@ jobs: matrix: python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 7168ea488..527950d61 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -23,7 +23,7 @@ jobs: build-mode: none steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Initialize CodeQL uses: github/codeql-action/init@v3 diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index f2b7117d8..e31dcc975 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout repository' - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: 'Dependency Review' uses: actions/dependency-review-action@v4 with: diff --git a/.github/workflows/e2e-test-pr.yml b/.github/workflows/e2e-test-pr.yml index 31e695aca..2f26c393b 100644 --- a/.github/workflows/e2e-test-pr.yml +++ b/.github/workflows/e2e-test-pr.yml @@ -48,7 +48,7 @@ jobs: # Check out merge commit - name: Checkout PR - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: ref: ${{ inputs.sha }} fetch-depth: 0 @@ -150,7 +150,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 submodules: 'recursive' diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml index 1c4ec8540..3c9c531fd 100644 --- a/.github/workflows/e2e-test.yml +++ b/.github/workflows/e2e-test.yml @@ -57,7 +57,7 @@ jobs: steps: - name: Clone Repository with SHA if: ${{ inputs.sha != '' }} - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 submodules: 'recursive' @@ -65,7 +65,7 @@ jobs: - name: Clone Repository without SHA if: ${{ inputs.sha == '' }} - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 submodules: 'recursive' @@ -111,7 +111,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 submodules: 'recursive' @@ -178,7 +178,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 submodules: 'recursive' diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 30bcb1956..7a3ee5f37 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Run Labeler uses: crazy-max/ghaction-github-labeler@24d110aa46a59976b8a7f35518cb7f14f434c916 diff --git a/.github/workflows/nightly-smoke-tests.yml b/.github/workflows/nightly-smoke-tests.yml index dc41e1600..2d7f9543a 100644 --- a/.github/workflows/nightly-smoke-tests.yml +++ b/.github/workflows/nightly-smoke-tests.yml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: ref: dev diff --git a/.github/workflows/publish-pypi.yaml b/.github/workflows/publish-pypi.yaml index 027ac5298..a921010ca 100644 --- a/.github/workflows/publish-pypi.yaml +++ b/.github/workflows/publish-pypi.yaml @@ -12,7 +12,7 @@ jobs: environment: pypi-release steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Setup Python uses: actions/setup-python@v5 diff --git a/.github/workflows/release-cross-repo-test.yml b/.github/workflows/release-cross-repo-test.yml index 052eaffb4..e6ca88cd3 100644 --- a/.github/workflows/release-cross-repo-test.yml +++ b/.github/workflows/release-cross-repo-test.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout linode_api4 repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 submodules: 'recursive' @@ -30,7 +30,7 @@ jobs: python-version: '3.10' - name: Checkout ansible repo - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: repository: linode/ansible_linode path: .ansible/collections/ansible_collections/linode/cloud From f2055c68e703de1f124c2f80cfce8ea2fc897c8c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Aug 2025 02:31:32 -0400 Subject: [PATCH 29/34] build(deps): bump actions/download-artifact from 4 to 5 (#580) Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4 to 5. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/e2e-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml index 3c9c531fd..7a94a1e24 100644 --- a/.github/workflows/e2e-test.yml +++ b/.github/workflows/e2e-test.yml @@ -184,7 +184,7 @@ jobs: submodules: 'recursive' - name: Download test report - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 with: name: test-report-file From 78ae6187d38d648bf661ad3ccfed2d4acc205a7f Mon Sep 17 00:00:00 2001 From: rammanoj Date: Fri, 22 Aug 2025 14:13:56 -0400 Subject: [PATCH 30/34] Add label to nodepool (#588) * add label to nodepool * remove redundant prints --------- Co-authored-by: rpotla --- linode_api4/objects/lke.py | 12 +++++------- test/fixtures/lke_clusters_18881_pools_456.json | 1 + test/fixtures/lke_clusters_18882_pools_789.json | 1 + test/unit/objects/lke_test.py | 5 +++++ 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/linode_api4/objects/lke.py b/linode_api4/objects/lke.py index 7086b1113..792aed988 100644 --- a/linode_api4/objects/lke.py +++ b/linode_api4/objects/lke.py @@ -187,6 +187,7 @@ class LKENodePool(DerivedBase): properties = { "id": Property(identifier=True), "cluster_id": Property(identifier=True), + "label": Property(mutable=True), "type": Property(slug_relationship=Type), "disks": Property(), "disk_encryption": Property(), @@ -419,6 +420,7 @@ def node_pool_create( Union[str, KubeVersion, TieredKubeVersion] ] = None, update_strategy: Optional[str] = None, + label: str = None, **kwargs, ): """ @@ -444,23 +446,19 @@ def node_pool_create( for possible values. :returns: The new Node Pool + :param label: The name of the node pool. + :type label: str :rtype: LKENodePool """ params = { "type": node_type, + "label": label, "count": node_count, "labels": labels, "taints": taints, "k8s_version": k8s_version, "update_strategy": update_strategy, } - - if labels is not None: - params["labels"] = labels - - if taints is not None: - params["taints"] = taints - params.update(kwargs) result = self._client.post( diff --git a/test/fixtures/lke_clusters_18881_pools_456.json b/test/fixtures/lke_clusters_18881_pools_456.json index f904b9c95..9aa5fb0f0 100644 --- a/test/fixtures/lke_clusters_18881_pools_456.json +++ b/test/fixtures/lke_clusters_18881_pools_456.json @@ -34,6 +34,7 @@ "foo": "bar", "bar": "foo" }, + "label": "example-node-pool", "type": "g6-standard-4", "disk_encryption": "enabled" } \ No newline at end of file diff --git a/test/fixtures/lke_clusters_18882_pools_789.json b/test/fixtures/lke_clusters_18882_pools_789.json index a7bbc4749..d3c17eedb 100644 --- a/test/fixtures/lke_clusters_18882_pools_789.json +++ b/test/fixtures/lke_clusters_18882_pools_789.json @@ -1,6 +1,7 @@ { "id": 789, "type": "g6-standard-2", + "label": "enterprise-node-pool", "count": 3, "nodes": [], "disks": [], diff --git a/test/unit/objects/lke_test.py b/test/unit/objects/lke_test.py index a0ad63288..cb9589cfb 100644 --- a/test/unit/objects/lke_test.py +++ b/test/unit/objects/lke_test.py @@ -51,6 +51,7 @@ def test_get_pool(self): assert pool.id == 456 assert pool.cluster_id == 18881 assert pool.type.id == "g6-standard-4" + assert pool.label == "example-node-pool" assert pool.disk_encryption == InstanceDiskEncryptionType.enabled assert pool.disks is not None @@ -162,6 +163,7 @@ def test_load_node_pool(self): self.assertEqual(pool.id, 456) self.assertEqual(pool.cluster_id, 18881) self.assertEqual(pool.type.id, "g6-standard-4") + self.assertEqual(pool.label, "example-node-pool") self.assertIsNotNone(pool.disks) self.assertIsNotNone(pool.nodes) self.assertIsNotNone(pool.autoscaler) @@ -251,6 +253,7 @@ def test_lke_node_pool_update(self): pool.tags = ["foobar"] pool.count = 5 + pool.label = "testing-label" pool.autoscaler = { "enabled": True, "min": 2, @@ -274,6 +277,7 @@ def test_lke_node_pool_update(self): "min": 2, "max": 10, }, + "label": "testing-label", "labels": { "updated-key": "updated-value", }, @@ -546,6 +550,7 @@ def test_cluster_enterprise(self): pool = LKENodePool(self.client, 789, 18882) assert pool.k8s_version == "1.31.1+lke1" assert pool.update_strategy == "rolling_update" + assert pool.label == "enterprise-node-pool" def test_lke_tiered_version(self): version = TieredKubeVersion(self.client, "1.32", "standard") From 0c1f2b855f204619d679c0828c77cfc5cc427e6b Mon Sep 17 00:00:00 2001 From: Erik Zilber Date: Fri, 5 Sep 2025 16:13:04 -0400 Subject: [PATCH 31/34] Updated incorrect documentation link for maintenance policies (#590) --- linode_api4/groups/maintenance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linode_api4/groups/maintenance.py b/linode_api4/groups/maintenance.py index f41780dfb..7d56cec6e 100644 --- a/linode_api4/groups/maintenance.py +++ b/linode_api4/groups/maintenance.py @@ -14,7 +14,7 @@ def maintenance_policies(self): Returns a collection of MaintenancePolicy objects representing available maintenance policies that can be applied to Linodes - API Documentation: https://techdocs.akamai.com/linode-api/reference/get-policies + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-maintenance-policies :returns: A list of Maintenance Policies that can be applied to Linodes :rtype: List of MaintenancePolicy objects as MappedObjects From 8b85487e49da429b5063888b7f9c29a82a4ededb Mon Sep 17 00:00:00 2001 From: Lena Garber <114949949+lgarber-akamai@users.noreply.github.com> Date: Mon, 8 Sep 2025 09:54:36 -0400 Subject: [PATCH 32/34] Resolve various integration test failures (#591) * Various test fixes to unblock upcoming release * Use dynamic label for VLAN test * oops * LA Disk Encryption -> Disk Encryption --- test/integration/filters/fixtures.py | 4 +- test/integration/models/image/test_image.py | 40 ++++++++++++++----- test/integration/models/linode/test_linode.py | 4 +- test/integration/models/lke/test_lke.py | 10 ++--- .../models/networking/test_networking.py | 2 +- .../models/nodebalancer/test_nodebalancer.py | 13 ++++-- 6 files changed, 47 insertions(+), 26 deletions(-) diff --git a/test/integration/filters/fixtures.py b/test/integration/filters/fixtures.py index 344303eee..31b7edcbf 100644 --- a/test/integration/filters/fixtures.py +++ b/test/integration/filters/fixtures.py @@ -23,9 +23,7 @@ def lke_cluster_instance(test_linode_client): node_type = test_linode_client.linode.types()[1] # g6-standard-1 version = test_linode_client.lke.versions()[0] - region = get_region( - test_linode_client, {"Kubernetes", "LA Disk Encryption"} - ) + region = get_region(test_linode_client, {"Kubernetes", "Disk Encryption"}) node_pools = test_linode_client.lke.node_pool(node_type, 3) label = get_test_label() + "_cluster" diff --git a/test/integration/models/image/test_image.py b/test/integration/models/image/test_image.py index 9124ddf97..58da0a56f 100644 --- a/test/integration/models/image/test_image.py +++ b/test/integration/models/image/test_image.py @@ -1,22 +1,46 @@ from io import BytesIO -from test.integration.conftest import get_region, get_regions +from test.integration.conftest import get_regions from test.integration.helpers import get_test_label import polling import pytest +from linode_api4 import LinodeClient from linode_api4.objects import Image +DISALLOWED_IMAGE_REGIONS = { + "gb-lon", + "au-mel", + "sg-sin-2", + "jp-tyo-3", +} + + +def get_image_upload_regions(client: LinodeClient): + """ + This is necessary because the API does not currently expose + a capability for regions that allow custom image uploads. + + In the future, we should remove this if the API exposes a custom images capability or + if all Object Storage regions support custom images. + """ + + return [ + region + for region in get_regions( + client, + capabilities={"Linodes", "Object Storage"}, + site_type="core", + ) + if region.id not in DISALLOWED_IMAGE_REGIONS + ] + @pytest.fixture(scope="session") def image_upload_url(test_linode_client): label = get_test_label() + "_image" - region = get_region( - test_linode_client, - capabilities={"Linodes", "Object Storage"}, - site_type="core", - ) + region = get_image_upload_regions(test_linode_client)[0] test_linode_client.image_create_upload( label, region.id, "integration test image upload" @@ -38,9 +62,7 @@ def test_uploaded_image(test_linode_client): label = get_test_label() + "_image" - regions = get_regions( - test_linode_client, capabilities={"Object Storage"}, site_type="core" - ) + regions = get_image_upload_regions(test_linode_client) image = test_linode_client.image_upload( label, diff --git a/test/integration/models/linode/test_linode.py b/test/integration/models/linode/test_linode.py index 52d948d26..e13903e4f 100644 --- a/test/integration/models/linode/test_linode.py +++ b/test/integration/models/linode/test_linode.py @@ -180,7 +180,7 @@ def create_linode_for_long_running_tests(test_linode_client, e2e_test_firewall): def linode_with_disk_encryption(test_linode_client, request): client = test_linode_client - target_region = get_region(client, {"LA Disk Encryption"}) + target_region = get_region(client, {"Disk Encryption"}) label = get_test_label(length=8) disk_encryption = request.param @@ -235,7 +235,7 @@ def test_linode_transfer(test_linode_client, linode_with_volume_firewall): def test_linode_rebuild(test_linode_client): client = test_linode_client - region = get_region(client, {"LA Disk Encryption"}) + region = get_region(client, {"Disk Encryption"}) label = get_test_label() + "_rebuild" diff --git a/test/integration/models/lke/test_lke.py b/test/integration/models/lke/test_lke.py index 3486485d6..241117442 100644 --- a/test/integration/models/lke/test_lke.py +++ b/test/integration/models/lke/test_lke.py @@ -32,9 +32,7 @@ def lke_cluster(test_linode_client): node_type = test_linode_client.linode.types()[1] # g6-standard-1 version = test_linode_client.lke.versions()[0] - region = get_region( - test_linode_client, {"Kubernetes", "LA Disk Encryption"} - ) + region = get_region(test_linode_client, {"Kubernetes", "Disk Encryption"}) node_pools = test_linode_client.lke.node_pool(node_type, 3) label = get_test_label() + "_cluster" @@ -117,9 +115,7 @@ def lke_cluster_with_labels_and_taints(test_linode_client): def lke_cluster_with_apl(test_linode_client): version = test_linode_client.lke.versions()[0] - region = get_region( - test_linode_client, {"Kubernetes", "LA Disk Encryption"} - ) + region = get_region(test_linode_client, {"Kubernetes", "Disk Encryption"}) # NOTE: g6-dedicated-4 is the minimum APL-compatible Linode type node_pools = test_linode_client.lke.node_pool("g6-dedicated-4", 3) @@ -149,7 +145,7 @@ def lke_cluster_enterprise(test_linode_client): )[0] region = get_region( - test_linode_client, {"Kubernetes Enterprise", "LA Disk Encryption"} + test_linode_client, {"Kubernetes Enterprise", "Disk Encryption"} ) node_pools = test_linode_client.lke.node_pool( diff --git a/test/integration/models/networking/test_networking.py b/test/integration/models/networking/test_networking.py index b92cdfadc..87a0e5842 100644 --- a/test/integration/models/networking/test_networking.py +++ b/test/integration/models/networking/test_networking.py @@ -235,7 +235,7 @@ def test_create_and_delete_vlan(test_linode_client, linode_for_vlan_tests): config.interfaces = [] config.save() - vlan_label = "testvlan" + vlan_label = f"{get_test_label(8)}-testvlan" interface = config.interface_create_vlan( label=vlan_label, ipam_address="10.0.0.2/32" ) diff --git a/test/integration/models/nodebalancer/test_nodebalancer.py b/test/integration/models/nodebalancer/test_nodebalancer.py index df07de215..9e7537897 100644 --- a/test/integration/models/nodebalancer/test_nodebalancer.py +++ b/test/integration/models/nodebalancer/test_nodebalancer.py @@ -167,7 +167,9 @@ def test_update_nb(test_linode_client, create_nb): create_nb.id, ) - nb.label = "ThisNewLabel" + new_label = f"{nb.label}-ThisNewLabel" + + nb.label = new_label nb.client_udp_sess_throttle = 5 nb.save() @@ -176,7 +178,7 @@ def test_update_nb(test_linode_client, create_nb): create_nb.id, ) - assert "ThisNewLabel" == nb_updated.label + assert new_label == nb_updated.label assert 5 == nb_updated.client_udp_sess_throttle @@ -215,7 +217,10 @@ def test_update_nb_node(test_linode_client, create_nb_config): create_nb_config.nodebalancer_id, ) node = config.nodes[0] - node.label = "ThisNewLabel" + + new_label = f"{node.label}-ThisNewLabel" + + node.label = new_label node.weight = 50 node.mode = "accept" node.save() @@ -226,7 +231,7 @@ def test_update_nb_node(test_linode_client, create_nb_config): (create_nb_config.id, create_nb_config.nodebalancer_id), ) - assert "ThisNewLabel" == node_updated.label + assert new_label == node_updated.label assert 50 == node_updated.weight assert "accept" == node_updated.mode From 2d3027728e6e637e140db40caba545b7d9b1ca2b Mon Sep 17 00:00:00 2001 From: Lena Garber <114949949+lgarber-akamai@users.noreply.github.com> Date: Mon, 8 Sep 2025 12:43:39 -0400 Subject: [PATCH 33/34] Add no-osl-1 to image test disallow list (#593) --- test/integration/models/image/test_image.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/models/image/test_image.py b/test/integration/models/image/test_image.py index 58da0a56f..18e223ff0 100644 --- a/test/integration/models/image/test_image.py +++ b/test/integration/models/image/test_image.py @@ -13,6 +13,7 @@ "au-mel", "sg-sin-2", "jp-tyo-3", + "no-osl-1", } From 4f8ba4c7f7570b74b180671d276322685e1962d5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Sep 2025 10:49:33 +0530 Subject: [PATCH 34/34] build(deps): bump actions/setup-python from 5 to 6 (#595) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5 to 6. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v5...v6) --- updated-dependencies: - dependency-name: actions/setup-python dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 4 ++-- .github/workflows/e2e-test-pr.yml | 4 ++-- .github/workflows/e2e-test.yml | 6 +++--- .github/workflows/nightly-smoke-tests.yml | 2 +- .github/workflows/publish-pypi.yaml | 2 +- .github/workflows/release-cross-repo-test.yml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d9863f1fd..c665358d7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,7 +17,7 @@ jobs: uses: actions/checkout@v5 - name: setup python 3 - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.x' @@ -34,7 +34,7 @@ jobs: python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] steps: - uses: actions/checkout@v5 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: ${{ matrix.python-version }} - name: Run tests diff --git a/.github/workflows/e2e-test-pr.yml b/.github/workflows/e2e-test-pr.yml index 2f26c393b..1e12c7475 100644 --- a/.github/workflows/e2e-test-pr.yml +++ b/.github/workflows/e2e-test-pr.yml @@ -80,7 +80,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.x' @@ -176,7 +176,7 @@ jobs: steps: - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.x' diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml index 7a94a1e24..282914ebc 100644 --- a/.github/workflows/e2e-test.yml +++ b/.github/workflows/e2e-test.yml @@ -71,7 +71,7 @@ jobs: submodules: 'recursive' - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ inputs.run-eol-python-version == 'true' && env.EOL_PYTHON_VERSION || inputs.python-version || env.DEFAULT_PYTHON_VERSION }} @@ -141,7 +141,7 @@ jobs: steps: - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.x' @@ -189,7 +189,7 @@ jobs: name: test-report-file - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.x' diff --git a/.github/workflows/nightly-smoke-tests.yml b/.github/workflows/nightly-smoke-tests.yml index 2d7f9543a..d905a1265 100644 --- a/.github/workflows/nightly-smoke-tests.yml +++ b/.github/workflows/nightly-smoke-tests.yml @@ -24,7 +24,7 @@ jobs: ref: dev - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.x' diff --git a/.github/workflows/publish-pypi.yaml b/.github/workflows/publish-pypi.yaml index a921010ca..85f142bc6 100644 --- a/.github/workflows/publish-pypi.yaml +++ b/.github/workflows/publish-pypi.yaml @@ -15,7 +15,7 @@ jobs: uses: actions/checkout@v5 - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.x' diff --git a/.github/workflows/release-cross-repo-test.yml b/.github/workflows/release-cross-repo-test.yml index e6ca88cd3..62f4bea47 100644 --- a/.github/workflows/release-cross-repo-test.yml +++ b/.github/workflows/release-cross-repo-test.yml @@ -25,7 +25,7 @@ jobs: run: sudo apt-get install -y build-essential - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.10'