Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions clean.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,6 @@ rm open-tofu/*.generated.tf
rm -r layer/create_layer_virtualenv
rm -r layer/python311_layer_content
rm -r layer/mpic_coordinator_layer_content
rm -r layer/mpic_caa_checker_layer_content
rm -r layer/mpic_dcv_checker_layer_content
rm -r layer/mpic_common_layer_content

rm layer/*.zip

Expand Down
2 changes: 2 additions & 0 deletions config.example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,5 @@ caa-domains:
# absolute maximum regardless of orchestration parameters in request
# NOT to be interpreted as the default number of attempts (default is 1 unless explicitly set in request)
absolute-max-attempts: 3

log-level: INFO
12 changes: 12 additions & 0 deletions configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,12 @@ def main(raw_args=None):
# Set the source path for the lambda functions.
main_tf_string = main_tf_string.replace("{{source-path}}", f"{config['source-path']}")

# Set log level if present.
if "log-level" in config:
main_tf_string = main_tf_string.replace("{{log-level-with-key}}", f"log_level = \"{config['log-level']}\"")
else:
main_tf_string = main_tf_string.replace("{{log-level-with-key}}", "")

# Derive the out file from the input file name.
if not args.main_tf_template.endswith(".tf.template"):
print(f"Error: invalid tf template name: {args.main_tf_template}. Make sure all tf template files end in '.tf.template'.")
Expand Down Expand Up @@ -148,6 +154,12 @@ def main(raw_args=None):
# Set the source path for the lambda functions.
aws_perspective_tf_region = aws_perspective_tf_region.replace("{{source-path}}", f"{config['source-path']}")

# Set log level if present.
if "log-level" in config:
aws_perspective_tf_region = aws_perspective_tf_region.replace("{{log-level-with-key}}", f"log_level = \"{config['log-level']}\"")
else:
aws_perspective_tf_region = aws_perspective_tf_region.replace("{{log-level-with-key}}", "")

if not args.aws_perspective_tf_template.endswith(".tf.template"):
print(f"Error: invalid tf template name: {args.aws_perspective_tf_template}. Make sure all tf template files end in '.tf.template'.")
exit()
Expand Down
6 changes: 6 additions & 0 deletions open-tofu/aws-perspective.tf.template
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,11 @@ resource "aws_lambda_function" "mpic_dcv_checker_lambda_{{region}}" {
security_group_ids = [for s in aws_default_security_group.default_security_group_{{region}} : s.id]
}
provider = aws.{{region}}
environment {
variables = {
{{log-level-with-key}}
}
}
}

resource "aws_lambda_function" "mpic_caa_checker_lambda_{{region}}" {
Expand Down Expand Up @@ -246,6 +251,7 @@ resource "aws_lambda_function" "mpic_caa_checker_lambda_{{region}}" {
environment {
variables = {
default_caa_domains = {{default-caa-domains}}
{{log-level-with-key}}
}
}
}
1 change: 1 addition & 0 deletions open-tofu/main.tf.template
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ resource "aws_lambda_function" "mpic_coordinator_lambda" {
default_perspective_count = {{default-perspective-count}}
hash_secret = {{hash-secret}}
{{absolute-max-attempts-with-key}}
{{log-level-with-key}}
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,14 @@ classifiers = [
"Programming Language :: Python :: Implementation :: PyPy",
]
dependencies = [
# "open-mpic-core @ git+https://github.com/open-mpic/open-mpic-core-python.git@main",
#"open-mpic-core @ git+https://github.com/open-mpic/open-mpic-core-python.git@ds-trace-logging",
"pyyaml==6.0.1",
"requests>=2.32.3",
"dnspython==2.6.1",
"pydantic==2.8.2",
"aiohttp==3.11.11",
"aws-lambda-powertools[parser]==3.2.0",
"open-mpic-core==4.4.0",
"open-mpic-core==4.6.1",
"aioboto3~=13.3.0",
]

Expand Down
2 changes: 1 addition & 1 deletion src/aws_lambda_mpic/__about__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.3.2"
__version__ = "0.4.0"
Original file line number Diff line number Diff line change
@@ -1,17 +1,28 @@
import os
import asyncio

from aws_lambda_powertools.utilities.parser import event_parser

from open_mpic_core.common_domain.check_request import CaaCheckRequest
from open_mpic_core.mpic_caa_checker.mpic_caa_checker import MpicCaaChecker
import os
from open_mpic_core.common_util.trace_level_logger import get_logger

logger = get_logger(__name__)


class MpicCaaCheckerLambdaHandler:
def __init__(self):
self.perspective_code = os.environ['AWS_REGION']
self.default_caa_domain_list = os.environ['default_caa_domains'].split("|")
self.caa_checker = MpicCaaChecker(self.default_caa_domain_list, self.perspective_code)
self.log_level = os.environ['log_level'] if 'log_level' in os.environ else None

self.logger = logger.getChild(self.__class__.__name__)
if self.log_level:
self.logger.setLevel(self.log_level)

self.caa_checker = MpicCaaChecker(default_caa_domain_list=self.default_caa_domain_list,
perspective_code=self.perspective_code,
log_level=self.logger.level)

def process_invocation(self, caa_request: CaaCheckRequest):
try:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,18 @@
import logging
import os
import json
import traceback

import yaml
import asyncio
import aioboto3

from asyncio import Queue
from collections import defaultdict
from importlib import resources

from aws_lambda_powertools.utilities.parser import event_parser, envelopes
from pydantic import TypeAdapter, ValidationError, BaseModel
from aws_lambda_powertools.utilities.parser import event_parser, envelopes

from open_mpic_core.common_domain.check_request import BaseCheckRequest
from open_mpic_core.common_domain.check_response import CheckResponse
from open_mpic_core.mpic_coordinator.domain.mpic_request import MpicRequest
Expand All @@ -18,6 +21,9 @@
from open_mpic_core.mpic_coordinator.mpic_coordinator import MpicCoordinator, MpicCoordinatorConfiguration
from open_mpic_core.common_domain.enum.check_type import CheckType
from open_mpic_core.mpic_coordinator.domain.remote_perspective import RemotePerspective
from open_mpic_core.common_util.trace_level_logger import get_logger

logger = get_logger(__name__)


class PerspectiveEndpointInfo(BaseModel):
Expand All @@ -37,6 +43,11 @@ def __init__(self):
self.default_perspective_count = int(os.environ['default_perspective_count'])
self.global_max_attempts = int(os.environ['absolute_max_attempts']) if 'absolute_max_attempts' in os.environ else None
self.hash_secret = os.environ['hash_secret']
self.log_level = os.getenv('log_level', None)

self.logger = logger.getChild(self.__class__.__name__)
if self.log_level:
self.logger.setLevel(self.log_level)

self.remotes_per_perspective_per_check_type = {
CheckType.DCV: {perspective_code: perspective_config.dcv_endpoint_info for perspective_code, perspective_config in perspectives.items()},
Expand All @@ -54,10 +65,7 @@ def __init__(self):
self.hash_secret
)

self.mpic_coordinator = MpicCoordinator(
self.call_remote_perspective,
self.mpic_coordinator_configuration
)
self.mpic_coordinator = MpicCoordinator(self.call_remote_perspective, self.mpic_coordinator_configuration, self.logger.level)

# for correct deserialization of responses based on discriminator field (check type)
self.mpic_request_adapter = TypeAdapter(MpicRequest)
Expand Down Expand Up @@ -125,7 +133,7 @@ async def call_remote_perspective(self, perspective: RemotePerspective, check_ty
response_payload = json.loads(await response['Payload'].read())
return self.check_response_adapter.validate_json(response_payload['body'])
except ValidationError as ve:
# We might want to handle this differently later.
self.logger.log(level=logging.ERROR, msg=f"Validation error in response from {perspective.code}: {ve}")
raise ve
finally:
await self.release_lambda_client(perspective.code, client)
Expand Down Expand Up @@ -184,6 +192,9 @@ def wrapper(*args, **kwargs):
except ValidationError as validation_error:
return build_400_response(MpicRequestValidationMessages.REQUEST_VALIDATION_FAILED.key, validation_error.errors())
except Exception as e:
logger.error(f"An error occurred: {str(e)}")
print(traceback.format_exc())
print(f"BOY HOWDY error occurred: {str(e)}")
return {
'statusCode': 500,
'headers': {'Content-Type': 'application/json'},
Expand Down
Original file line number Diff line number Diff line change
@@ -1,19 +1,27 @@
import os
import asyncio

from aws_lambda_powertools.utilities.parser import event_parser

from open_mpic_core.common_domain.check_request import DcvCheckRequest
from open_mpic_core.mpic_dcv_checker.mpic_dcv_checker import MpicDcvChecker
import os
from open_mpic_core.common_util.trace_level_logger import get_logger

logger = get_logger(__name__)


class MpicDcvCheckerLambdaHandler:
def __init__(self):
self.perspective_code = os.environ['AWS_REGION']
self.dcv_checker = MpicDcvChecker(self.perspective_code)
self.log_level = os.environ['log_level'] if 'log_level' in os.environ else None

self.logger = logger.getChild(self.__class__.__name__)
if self.log_level:
self.logger.setLevel(self.log_level)

async def initialize(self):
await self.dcv_checker.initialize()
self.dcv_checker = MpicDcvChecker(perspective_code=self.perspective_code,
reuse_http_client=False,
log_level=self.logger.level)

def process_invocation(self, dcv_request: DcvCheckRequest):
try:
Expand All @@ -23,6 +31,9 @@ def process_invocation(self, dcv_request: DcvCheckRequest):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)

self.logger.debug("(debug log) Processing DCV check request: %s", dcv_request)
print("(print) Processing DCV check request: %s", dcv_request)

dcv_response = event_loop.run_until_complete(self.dcv_checker.check_dcv(dcv_request))
status_code = 200
if dcv_response.errors is not None and len(dcv_response.errors) > 0:
Expand All @@ -42,26 +53,13 @@ def process_invocation(self, dcv_request: DcvCheckRequest):
_handler = None


async def initialize_handler() -> MpicDcvCheckerLambdaHandler:
handler = MpicDcvCheckerLambdaHandler()
await handler.initialize()
return handler


def get_handler() -> MpicDcvCheckerLambdaHandler:
"""
Singleton pattern to avoid recreating the handler on every Lambda invocation
"""
global _handler
if _handler is None:
try:
event_loop = asyncio.get_running_loop()
except RuntimeError:
# No running event loop, create a new one
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)

_handler = event_loop.run_until_complete(initialize_handler())
_handler = MpicDcvCheckerLambdaHandler()
return _handler


Expand Down
6 changes: 3 additions & 3 deletions tests/integration/test_deployed_mpic_api.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import json
import sys
import pytest
from open_mpic_core.common_domain.enum.dcv_validation_method import DcvValidationMethod

from pydantic import TypeAdapter

from open_mpic_core.common_domain.check_parameters import CaaCheckParameters, DcvWebsiteChangeValidationDetails, DcvAcmeDns01ValidationDetails, DcvDnsChangeValidationDetails
Expand Down Expand Up @@ -206,11 +208,9 @@ def api_should_return_200_is_valid_true_given_valid_dns_change_validation(self,

print("\nRequest:\n", json.dumps(request.model_dump(), indent=4)) # pretty print request body
response = api_client.post(MPIC_REQUEST_PATH, json.dumps(request.model_dump()))
assert response.status_code == 200
print("\nResponse:\n", json.dumps(json.loads(response.text), indent=4)) # pretty print request body

assert response.status_code == 200
mpic_response = self.mpic_response_adapter.validate_json(response.text)

assert mpic_response.is_valid is True

def api_should_return_200_and_failed_corroboration_given_failed_dcv_check(self, api_client):
Expand Down
21 changes: 21 additions & 0 deletions tests/unit/aws_lambda_mpic/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# conftest.py
import logging
from io import StringIO
import pytest


@pytest.fixture(autouse=True)
def setup_logging():
# Clear existing handlers
root = logging.getLogger()
for handler in root.handlers[:]:
root.removeHandler(handler)

log_output = StringIO() # to be able to inspect what gets logged
handler = logging.StreamHandler(log_output)
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))

# Configure fresh logging
logging.basicConfig(handlers=[handler])

yield log_output
21 changes: 20 additions & 1 deletion tests/unit/aws_lambda_mpic/test_caa_checker_lambda.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,23 @@
import time

import dns
import pytest

import aws_lambda_mpic.mpic_caa_checker_lambda.mpic_caa_checker_lambda_function as mpic_caa_checker_lambda_function
from open_mpic_core.common_domain.check_response import CaaCheckResponse, CaaCheckResponseDetails
from open_mpic_core_test.test_util.mock_dns_object_creator import MockDnsObjectCreator
from open_mpic_core_test.test_util.valid_check_creator import ValidCheckCreator


# noinspection PyMethodMayBeStatic
class TestCaaCheckerLambda:
@staticmethod
@pytest.fixture(scope='class')
def set_env_variables():
envvars = {
'AWS_REGION': 'us-east-1',
'default_caa_domains': 'ca1.com|ca2.org|ca3.net'
'default_caa_domains': 'ca1.com|ca2.org|ca3.net',
'log_level': 'TRACE'
}
with pytest.MonkeyPatch.context() as class_scoped_monkeypatch:
for k, v in envvars.items():
Expand All @@ -33,6 +37,21 @@ def lambda_handler__should_do_caa_check_using_configured_caa_checker(self, set_e
result = mpic_caa_checker_lambda_function.lambda_handler(caa_check_request, None)
assert result == mock_return_value

def lambda_handler__should_set_log_level_of_caa_checker(self, set_env_variables, setup_logging, mocker):
caa_check_request = ValidCheckCreator.create_valid_caa_check_request()

records = [MockDnsObjectCreator.create_caa_record(0, 'issue', 'ca1.org')]
mock_rrset = MockDnsObjectCreator.create_rrset(dns.rdatatype.CAA, *records)
mock_domain = dns.name.from_text(caa_check_request.domain_or_ip_target)
mock_return = (mock_rrset, mock_domain)
mocker.patch('open_mpic_core.mpic_caa_checker.mpic_caa_checker.MpicCaaChecker.find_caa_records_and_domain',
return_value=mock_return)

result = mpic_caa_checker_lambda_function.lambda_handler(caa_check_request, None)
assert result['statusCode'] == 200
log_contents = setup_logging.getvalue()
assert all(text in log_contents for text in ['MpicCaaChecker', 'TRACE']) # Verify the log level was set

@staticmethod
def create_caa_check_response():
return CaaCheckResponse(perspective_code='us-east-1', check_passed=True,
Expand Down
Loading
Loading