diff --git a/.gitignore b/.gitignore index 3fff196..43e62dd 100644 --- a/.gitignore +++ b/.gitignore @@ -65,4 +65,8 @@ terraform.rc .vscode/ # Ignore MegaLinter reports -megalinter-reports/ \ No newline at end of file +megalinter-reports/ + +# Ignore locally written data +output/ +debug.log \ No newline at end of file diff --git a/Makefile b/Makefile index 6b980a7..96713d8 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,9 @@ clean: ## Clean the temporary files. rm -rf .pytest_cache rm -rf tests/__pycache__ rm -rf .coverage + rm -rf megalinter-reports/ + rm -rf output + rm -rf debug.log .PHONY: black-check black-check: ## Run black for code formatting, without fixing. diff --git a/README.md b/README.md index fbfd889..6547e3c 100644 --- a/README.md +++ b/README.md @@ -14,16 +14,6 @@ The Copilot dashboard can be found on the Copilot tab within the Digital Landsca - [Table of Contents](#table-of-contents) - [Prerequisites](#prerequisites) - [Makefile](#makefile) - - [Documentation](#documentation) - - [Testing](#testing) - - [Linting](#linting) - - [Python](#python) - - [Markdown](#markdown) - - [Markdown Configuration](#markdown-configuration) - - [Markdown GitHub Action](#markdown-github-action) - - [Megalinter](#megalinter) - - [Megalinter Configuration](#megalinter-configuration) - - [Megalinter GitHub Action](#megalinter-github-action) - [AWS Lambda Scripts](#aws-lambda-scripts) - [Setup - Running in a container](#setup---running-in-a-container) - [Setup - running outside of a Container (Development only)](#setup---running-outside-of-a-container-development-only) @@ -40,6 +30,16 @@ The Copilot dashboard can be found on the Copilot tab within the Digital Landsca - [Allowlisting your IP](#allowlisting-your-ip) - [Setting up a pipeline](#setting-up-a-pipeline) - [Triggering a pipeline](#triggering-a-pipeline) + - [Documentation](#documentation) + - [Testing](#testing) + - [Linting](#linting) + - [Python](#python) + - [Markdown](#markdown) + - [Markdown Configuration](#markdown-configuration) + - [Markdown GitHub Action](#markdown-github-action) + - [Megalinter](#megalinter) + - [Megalinter Configuration](#megalinter-configuration) + - [Megalinter GitHub Action](#megalinter-github-action) ## Prerequisites @@ -62,110 +62,6 @@ This repository has a Makefile for executing common commands. To view all comman make all ``` -## Documentation - -This project uses MkDocs for documentation which gets deployed to GitHub Pages at a repository level. - -For more information about MkDocs, see the below documentation. - -[Getting Started with MkDocs](https://www.mkdocs.org/getting-started/) - -There is a guide to getting started on this repository's GitHub Pages site. - -## Testing - -This project uses Pytest for testing. The tests can be found in the `tests` folder. - -To run all tests, use `make test`. - -On pull request or push to the `main` branch, the tests will automatically run. The workflow will fail if any tests fail, or if test coverage is below 95%. - -The related workflow can be found in `.github/workflows/ci.yml`. - -## Linting - -### Python - -This project uses Black, Ruff, and Pylint for linting and code formatting on Python files in `src`. Configurations for each are located in `pyproject.toml`. - -The following Makefile commands can be used to run linting and optionally apply fixes or run a specific linter: - -```bash -black-check ## Run black for code formatting, without fixing. - -black-apply ## Run black and fix code formatting. - -ruff-check ## Run ruff for linting and code formatting, without fixing. - -ruff-apply ## Run ruff and fix linting and code formatting. - -pylint ## Run pylint for code analysis. - -lint ## Run Python linters without fixing. - -lint-apply ## Run black and ruff with auto-fix, and Pylint. -``` - -On pull request or push to the `main` branch, `make lint-check` will automatically run to check code quality, failing if there are any issues. It is up to the developer to apply fixes. - -The related workflow can be found in `.github/workflows/ci.yml`. - -### Markdown - -Markdown linting runs in a docker image, so docker must be running before attempting to lint. - -To lint all markdown files, run the following command: - -```bash -make md-check -``` - -To fix all markdown files, run the following command: - -```bash -make md-apply -``` - -#### Markdown Configuration - -The `.markdownlint.json` file in the root of the repository contains the configuration for markdownlint. This file is used to set the rules and settings for linting markdown files. - -Currently, MD013 (line length) is disabled. This is because the default line length of 80 characters is too restrictive. - -For a full list of rules, see [Markdownlint Rules / Aliases](https://github.com/DavidAnson/markdownlint?tab=readme-ov-file#rules--aliases) - -The `.markdownlintignore` file in the root of the repository is also used to prevent markdownlint running on unnecessary files such as `venv`. - -#### Markdown GitHub Action - -On pull request or push to the `main` branch, `make md-check` will automatically run to check for linting errors, failing if there are any issues. It is up to the developer to apply fixes. - -The related workflow can be found in `.github/workflows/ci.yml`. - -### Megalinter - -In addition to Python and Markdown-specific linting, this project uses Megalinter to catch all other types of linting errors across the project. - -Megalinter runs in a docker image, so docker must be running before attempting to lint. - -To lint with Megalinter, run: - -```bash -make megalint -``` - -After running, Megalinter will create a folder named `megalinter-reports` containing detailed logs on linting. - -#### Megalinter Configuration - -The configuration file for Megalinter can be found in the root of the repository, named `.mega-linter.yml`. - -#### Megalinter GitHub Action - -On pull request or push to the `main` branch, Megalinter will automatically run to check project-wide linting, failing if there are any issues. - -The related workflow can be found in `.github/workflows/megalinter.yml`. - ## AWS Lambda Scripts This script: @@ -485,3 +381,107 @@ Once the pipeline has been set, you can manually trigger a build on the Concours ```bash fly -t aws-sdp trigger-job -j github-copilot-usage-lambda-/build-and-push ``` + +## Documentation + +This project uses MkDocs for documentation which gets deployed to GitHub Pages at a repository level. + +For more information about MkDocs, see the below documentation. + +[Getting Started with MkDocs](https://www.mkdocs.org/getting-started/) + +There is a guide to getting started on this repository's GitHub Pages site. + +## Testing + +This project uses Pytest for testing. The tests can be found in the `tests` folder. + +To run all tests, use `make test`. + +On pull request or push to the `main` branch, the tests will automatically run. The workflow will fail if any tests fail, or if test coverage is below 95%. + +The related workflow can be found in `.github/workflows/ci.yml`. + +## Linting + +### Python + +This project uses Black, Ruff, and Pylint for linting and code formatting on Python files in `src`. Configurations for each are located in `pyproject.toml`. + +The following Makefile commands can be used to run linting and optionally apply fixes or run a specific linter: + +```bash +black-check ## Run black for code formatting, without fixing. + +black-apply ## Run black and fix code formatting. + +ruff-check ## Run ruff for linting and code formatting, without fixing. + +ruff-apply ## Run ruff and fix linting and code formatting. + +pylint ## Run pylint for code analysis. + +lint ## Run Python linters without fixing. + +lint-apply ## Run black and ruff with auto-fix, and Pylint. +``` + +On pull request or push to the `main` branch, `make lint-check` will automatically run to check code quality, failing if there are any issues. It is up to the developer to apply fixes. + +The related workflow can be found in `.github/workflows/ci.yml`. + +### Markdown + +Markdown linting runs in a docker image, so docker must be running before attempting to lint. + +To lint all markdown files, run the following command: + +```bash +make md-check +``` + +To fix all markdown files, run the following command: + +```bash +make md-apply +``` + +#### Markdown Configuration + +The `.markdownlint.json` file in the root of the repository contains the configuration for markdownlint. This file is used to set the rules and settings for linting markdown files. + +Currently, MD013 (line length) is disabled. This is because the default line length of 80 characters is too restrictive. + +For a full list of rules, see [Markdownlint Rules / Aliases](https://github.com/DavidAnson/markdownlint?tab=readme-ov-file#rules--aliases) + +The `.markdownlintignore` file in the root of the repository is also used to prevent markdownlint running on unnecessary files such as `venv`. + +#### Markdown GitHub Action + +On pull request or push to the `main` branch, `make md-check` will automatically run to check for linting errors, failing if there are any issues. It is up to the developer to apply fixes. + +The related workflow can be found in `.github/workflows/ci.yml`. + +### Megalinter + +In addition to Python and Markdown-specific linting, this project uses Megalinter to catch all other types of linting errors across the project. + +Megalinter runs in a docker image, so docker must be running before attempting to lint. + +To lint with Megalinter, run: + +```bash +make megalint +``` + +After running, Megalinter will create a folder named `megalinter-reports` containing detailed logs on linting. + +#### Megalinter Configuration + +The configuration file for Megalinter can be found in the root of the repository, named `.mega-linter.yml`. + +#### Megalinter GitHub Action + +On pull request or push to the `main` branch, Megalinter will automatically run to check project-wide linting, failing if there are any issues. + +The related workflow can be found in `.github/workflows/megalinter.yml`. diff --git a/config/config.json b/config/config.json new file mode 100644 index 0000000..b01f8a1 --- /dev/null +++ b/config/config.json @@ -0,0 +1,6 @@ +{ + "features": { + "show_log_locally": false, + "write_data_locally": false + } +} diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 0000000..f3ca326 --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,64 @@ +# Configuration + +The Copilot lambda uses a local configuration file to manage its settings, located within `./config/config.json`. + +## `config.json` + +The `config.json` file contains the following: + +```json +{ + "features": { + "show_log_locally": false, + "write_data_locally": false + }, +} +``` + +### `features` Section + +This section contains feature flags that control which of the tool's features are enabled or disabled. + +#### `show_log_locally` + +If set to `true`, the tool will output logs to the terminal. + +When deploying to AWS, this should be set to `false` to avoid files being written to the local filesystem. + +#### `write_data_locally` + +If set to `true`, the tool will skip writing to the appropriate AWS S3 bucket and instead write data for copilot teams, historic usage, and teams history to `local_data`. + +**When deploying to AWS, this must be set to `false` to ensure the tool writes to AWS.** + +When debugging locally, you can set this to `true` to use the local configuration file. This is useful if you need to see the logs locally, without affecting the cloud deployment. + +### Example During Local Testing + +When testing locally, you might set the `config.json` file as follows: + +```json +{ + "features": { + "show_log_locally": true, + "write_data_locally": true + }, +} +``` + +### Example On AWS + +When deploying to AWS, the `config.json` file should be set as follows: + +```json +{ + "features": { + "show_log_locally": false, + "write_data_locally": false + }, +} +``` + +This configuration ensures that the tool does not log or write data locally. + +**It is essential that `write_data_locally` is set to `false` when deploying to AWS.** diff --git a/src/main.py b/src/main.py index d1f4ea1..d75891a 100644 --- a/src/main.py +++ b/src/main.py @@ -8,7 +8,7 @@ import json import logging import os -from typing import Optional +from typing import Any, Optional import boto3 import github_api_toolkit @@ -33,6 +33,8 @@ logger = logging.getLogger() +logger.setLevel(logging.INFO) + # Example Log Output: # # Standard output: @@ -92,6 +94,7 @@ def get_copilot_team_date(gh: github_api_toolkit.github_interface, page: int) -> logger.info( "Team %s has Copilot data", + team_name, extra={ "team_name": team_name, "team_slug": team_slug, @@ -113,13 +116,14 @@ def get_copilot_team_date(gh: github_api_toolkit.github_interface, page: int) -> def get_and_update_historic_usage( - s3: boto3.client, gh: github_api_toolkit.github_interface + s3: boto3.client, gh: github_api_toolkit.github_interface, write_data_locally: bool ) -> tuple: """Get and update historic usage data from GitHub Copilot. Args: s3 (boto3.client): An S3 client. gh (github_api_toolkit.github_interface): An instance of the github_interface class. + write_data_locally (bool): Whether to write data locally instead of to an S3 bucket. Returns: tuple: A tuple containing the updated historic usage data and a list of dates added. @@ -153,18 +157,28 @@ def get_and_update_historic_usage( extra={"no_days_added": len(dates_added), "dates_added": dates_added}, ) - # Write the updated historic_usage to historic_usage_data.json - update_s3_object(s3, BUCKET_NAME, OBJECT_NAME, historic_usage) + if not write_data_locally: + # Write the updated historic_usage to historic_usage_data.json + update_s3_object(s3, BUCKET_NAME, OBJECT_NAME, historic_usage) + else: + local_path = f"output/{OBJECT_NAME}" + os.makedirs("output", exist_ok=True) + with open(local_path, "w", encoding="utf-8") as f: + json.dump(historic_usage, f, indent=4) + logger.info("Historic usage data written locally to %s (S3 skipped)", local_path) return historic_usage, dates_added -def get_and_update_copilot_teams(s3: boto3.client, gh: github_api_toolkit.github_interface) -> list: +def get_and_update_copilot_teams( + s3: boto3.client, gh: github_api_toolkit.github_interface, write_data_locally: bool +) -> list: """Get and update GitHub Teams with Copilot Data. Args: s3 (boto3.client): An S3 client. gh (github_api_toolkit.github_interface): An instance of the github_interface class. + write_data_locally (bool): Whether to write data locally instead of to an S3 bucket. Returns: list: A list of GitHub Teams with Copilot Data. @@ -191,7 +205,14 @@ def get_and_update_copilot_teams(s3: boto3.client, gh: github_api_toolkit.github extra={"no_teams": len(copilot_teams)}, ) - update_s3_object(s3, BUCKET_NAME, "copilot_teams.json", copilot_teams) + if not write_data_locally: + update_s3_object(s3, BUCKET_NAME, "copilot_teams.json", copilot_teams) + else: + local_path = "output/copilot_teams.json" + os.makedirs("output", exist_ok=True) + with open(local_path, "w", encoding="utf-8") as f: + json.dump(copilot_teams, f, indent=4) + logger.info("Copilot teams data written locally to %s (S3 skipped)", local_path) return copilot_teams @@ -248,7 +269,10 @@ def create_dictionary( def update_s3_object( - s3_client: boto3.client, bucket_name: str, object_name: str, data: dict + s3_client: boto3.client, + bucket_name: str, + object_name: str, + data: dict, ) -> bool: """Update an S3 object with new data. @@ -293,12 +317,62 @@ def get_team_history( response = gh.get(f"/orgs/{org}/team/{team}/copilot/metrics", params=query_params) if not isinstance(response, Response): - logger.error("Unexpected response type: %s", type(response)) + # If the response is not a Response object, no copilot data is available for this team + # We can return None which is then handled by the calling function return None return response.json() -def handler(event: dict, context) -> str: # pylint: disable=unused-argument +def get_dict_value(dictionary: dict, key: str) -> Any: + """Gets a value from a dictionary and raises an exception if it is not found. + + Args: + dictionary (dict): The dictionary to get the value from. + key (str): The key to get the value for. + + Raises: + Exception: If the key is not found in the dictionary. + + Returns: + Any: The value of the key in the dictionary. + """ + value = dictionary.get(key) + + if value is None: + raise ValueError(f"Key {key} not found in the dictionary.") + + return value + + +def get_config_file(path: str) -> Any: + """Loads a configuration file as a dictionary. + + Args: + path (str): The path to the configuration file. + + Raises: + Exception: If the configuration file is not found. + + Returns: + Any: The configuration file as a dictionary. + """ + try: + with open(path, encoding="utf-8") as f: + config = json.load(f) + except FileNotFoundError: + error_message = f"{path} configuration file not found. Please check the path." + raise FileNotFoundError(error_message) from None + + if not isinstance(config, dict): + error_message = ( + f"{path} configuration file is not a dictionary. Please check the file contents." + ) + raise TypeError(error_message) + + return config + + +def handler(event: dict, context) -> str: # pylint: disable=unused-argument, too-many-locals """AWS Lambda handler function for GitHub Copilot usage data aggregation. This function: @@ -315,6 +389,25 @@ def handler(event: dict, context) -> str: # pylint: disable=unused-argument Returns: str: Completion message. """ + # Load config file + config = get_config_file("./config/config.json") + + features = get_dict_value(config, "features") + + show_log_locally = get_dict_value(features, "show_log_locally") + + write_data_locally = get_dict_value(features, "write_data_locally") + + # Toggle local logging + if show_log_locally: + # This is a nightmare to test as it's really hard to get to. + # At some point we should look to make a wrapper for logging + # so it can be tested more easily. + logging.basicConfig( + filename="debug.log", + filemode="w", + ) + # Create an S3 client session = boto3.Session() s3 = session.client("s3") @@ -342,10 +435,10 @@ def handler(event: dict, context) -> str: # pylint: disable=unused-argument logger.info("API Controller created") # Copilot Usage Data (Historic) - historic_usage, dates_added = get_and_update_historic_usage(s3, gh) + historic_usage, dates_added = get_and_update_historic_usage(s3, gh, write_data_locally) # GitHub Teams with Copilot Data - copilot_teams = get_and_update_copilot_teams(s3, gh) + copilot_teams = get_and_update_copilot_teams(s3, gh, write_data_locally) logger.info("Getting history of each team identified previously") @@ -359,11 +452,21 @@ def handler(event: dict, context) -> str: # pylint: disable=unused-argument logger.info("Existing team history has %d entries", len(existing_team_history)) - # Convert to dictionary for quick lookup - updated_team_history = create_dictionary(gh, copilot_teams, existing_team_history) - - # Write updated team history to S3 - update_s3_object(s3, BUCKET_NAME, "teams_history.json", updated_team_history) + if not write_data_locally: + # Convert to dictionary for quick lookup + updated_team_history = create_dictionary(gh, copilot_teams, existing_team_history) + + # Write updated team history to S3 + # This line isn't covered by tests as it's painful to get to. + # The function itself is tested though. + update_s3_object(s3, BUCKET_NAME, "teams_history.json", updated_team_history) + else: + local_path = "output/teams_history.json" + os.makedirs("output", exist_ok=True) + updated_team_history = create_dictionary(gh, copilot_teams, existing_team_history) + with open(local_path, "w", encoding="utf-8") as f: + json.dump(updated_team_history, f, indent=4) + logger.info("Team history written locally to %s (S3 skipped)", local_path) logger.info( "Process complete", diff --git a/tests/test_main.py b/tests/test_main.py index f45beab..628f57f 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -18,6 +18,8 @@ get_team_history, handler, update_s3_object, + get_dict_value, + get_config_file, ) @@ -70,7 +72,7 @@ def test_get_and_update_copilot_teams_single_page(self, mock_update_s3_object): with patch( "src.main.get_copilot_team_date", return_value=[{"name": "team1"}] ) as mock_get_team_date: - result = get_and_update_copilot_teams(s3, gh) + result = get_and_update_copilot_teams(s3, gh, False) assert result == [{"name": "team1"}] mock_get_team_date.assert_called_once_with(gh, 1) mock_update_s3_object.assert_called_once() @@ -93,7 +95,7 @@ def test_get_and_update_copilot_teams_multiple_pages(self, mock_update_s3_object "src.main.get_copilot_team_date", side_effect=[[{"name": "team1"}], [{"name": "team2"}], [{"name": "team3"}]], ) as mock_get_team_date: - result = get_and_update_copilot_teams(s3, gh) + result = get_and_update_copilot_teams(s3, gh, False) assert result == [{"name": "team1"}, {"name": "team2"}, {"name": "team3"}] assert mock_get_team_date.call_count == 3 mock_update_s3_object.assert_called_once() @@ -107,7 +109,7 @@ def test_get_and_update_copilot_teams_no_teams(self, mock_update_s3_object): gh.get.return_value = mock_response with patch("src.main.get_copilot_team_date", return_value=[]) as mock_get_team_date: - result = get_and_update_copilot_teams(s3, gh) + result = get_and_update_copilot_teams(s3, gh, False) assert result == [] mock_get_team_date.assert_called_once_with(gh, 1) mock_update_s3_object.assert_called_once() @@ -116,6 +118,22 @@ def test_get_and_update_copilot_teams_no_teams(self, mock_update_s3_object): assert args[2] == "copilot_teams.json" assert args[3] == [] + def test_write_data_locally_creates_file(self, tmp_path): + s3 = MagicMock() + gh = MagicMock() + response = MagicMock() + response.links = {} + gh.get.return_value = response + + with patch("src.main.get_copilot_team_date", return_value=[{"name": "teamA"}]): + with patch("src.main.os.makedirs") as mock_makedirs, \ + patch("src.main.open", create=True) as mock_open: + result = get_and_update_copilot_teams(s3, gh, True) + assert result == [{"name": "teamA"}] + mock_makedirs.assert_called_once_with("output", exist_ok=True) + mock_open.assert_called_once() + s3.put_object.assert_not_called() + class TestGetTeamHistory: def setup_method(self): @@ -138,17 +156,6 @@ def test_get_team_history_success(self): ) assert result == [{"date": "2024-01-01", "usage": 5}] - def test_get_team_history_unexpected_response_type(self, caplog): - gh = MagicMock() - gh.get.return_value = "not_a_response" - - with caplog.at_level("ERROR"): - result = get_team_history(gh, "dev-team") - assert result is None - assert any( - "Unexpected response type" in record.getMessage() for record in caplog.records - ) - def test_get_team_history_with_no_query_params(self): gh = MagicMock() mock_response = MagicMock(spec=Response) @@ -370,7 +377,7 @@ def test_get_and_update_historic_usage_success(self): ) } - result, dates_added = get_and_update_historic_usage(s3, gh) + result, dates_added = get_and_update_historic_usage(s3, gh, False) assert result == [ {"date": "2024-01-01", "usage": 10}, {"date": "2024-01-02", "usage": 20}, @@ -395,7 +402,7 @@ def test_get_and_update_historic_usage_no_existing_data(self, caplog): operation_name="GetObject", ) - result, dates_added = get_and_update_historic_usage(s3, gh) + result, dates_added = get_and_update_historic_usage(s3, gh, False) assert result == [{"date": "2024-01-01", "usage": 10}] assert dates_added == ["2024-01-01"] s3.put_object.assert_called_once() @@ -418,11 +425,33 @@ def test_get_and_update_historic_usage_no_new_dates(self): ) } - result, dates_added = get_and_update_historic_usage(s3, gh) + result, dates_added = get_and_update_historic_usage(s3, gh, False) assert result == [{"date": "2024-01-01", "usage": 10}] assert dates_added == [] s3.put_object.assert_called_once() + def test_write_data_locally_creates_file(self, tmp_path): + s3 = MagicMock() + gh = MagicMock() + usage_data = [{"date": "2024-01-01", "usage": 10}] + gh.get.return_value.json.return_value = usage_data + + # S3 get_object raises ClientError + s3.get_object.side_effect = ClientError( + error_response={"Error": {"Code": "404", "Message": "Not Found"}}, + operation_name="GetObject", + ) + + # Patch os.makedirs and open to use tmp_path + with patch("src.main.os.makedirs") as mock_makedirs, \ + patch("src.main.open", create=True) as mock_open: + result, dates_added = get_and_update_historic_usage(s3, gh, True) + assert result == [{"date": "2024-01-01", "usage": 10}] + assert dates_added == ["2024-01-01"] + mock_makedirs.assert_called_once_with("output", exist_ok=True) + mock_open.assert_called_once() + s3.put_object.assert_not_called() + class TestCreateDictionary: def setup_method(self): @@ -501,3 +530,107 @@ def test_create_dictionary_no_new_history(self, caplog): result = create_dictionary(gh, copilot_teams, existing_team_history) assert result == [] assert mock_get_team_history.call_count == 1 + + +class TestGetTeamHistory: + def setup_method(self): + self.org_patch = patch("src.main.org", "test-org") + self.org_patch.start() + + def teardown_method(self): + self.org_patch.stop() + + def test_get_team_history_returns_metrics(self): + gh = MagicMock() + mock_response = MagicMock(spec=Response) + mock_response.json.return_value = [{"date": "2024-01-01", "usage": 5}] + gh.get.return_value = mock_response + + result = get_team_history(gh, "dev-team", {"since": "2024-01-01"}) + gh.get.assert_called_once_with( + "/orgs/test-org/team/dev-team/copilot/metrics", params={"since": "2024-01-01"} + ) + assert result == [{"date": "2024-01-01", "usage": 5}] + + def test_get_team_history_returns_empty_list(self): + gh = MagicMock() + mock_response = MagicMock(spec=Response) + mock_response.json.return_value = [] + gh.get.return_value = mock_response + + result = get_team_history(gh, "dev-team") + gh.get.assert_called_once_with("/orgs/test-org/team/dev-team/copilot/metrics", params=None) + assert result == [] + + def test_get_team_history_non_response_returns_none(self): + gh = MagicMock() + gh.get.return_value = "not_a_response" + + result = get_team_history(gh, "dev-team") + gh.get.assert_called_once_with("/orgs/test-org/team/dev-team/copilot/metrics", params=None) + assert result is None + + def test_get_team_history_with_query_params_none(self): + gh = MagicMock() + mock_response = MagicMock(spec=Response) + mock_response.json.return_value = [{"date": "2024-01-01", "usage": 5}] + gh.get.return_value = mock_response + + result = get_team_history(gh, "dev-team", None) + gh.get.assert_called_once_with("/orgs/test-org/team/dev-team/copilot/metrics", params=None) + assert result == [{"date": "2024-01-01", "usage": 5}] + + +class TestGetDictValue: + def test_get_dict_value_returns_value(self): + d = {"foo": "bar", "baz": 42} + assert get_dict_value(d, "foo") == "bar" + assert get_dict_value(d, "baz") == 42 + + def test_get_dict_value_raises_for_missing_key(self): + d = {"foo": "bar"} + try: + get_dict_value(d, "missing") + except ValueError as e: + assert str(e) == "Key missing not found in the dictionary." + else: + assert False, "ValueError not raised for missing key" + + def test_get_dict_value_returns_none_for_key_with_none_value(self): + d = {"foo": None} + try: + get_dict_value(d, "foo") + except ValueError as e: + assert str(e) == "Key foo not found in the dictionary." + else: + assert False, "ValueError not raised for None value" + + +class TestGetConfigFile: + def test_get_config_file_success(self, tmp_path): + config_data = {"features": {"show_log_locally": False}} + config_path = tmp_path / "config.json" + config_path.write_text(json.dumps(config_data), encoding="utf-8") + + result = get_config_file(str(config_path)) + assert result == config_data + + def test_get_config_file_file_not_found(self): + missing_path = "nonexistent_config.json" + try: + get_config_file(missing_path) + except FileNotFoundError as e: + assert missing_path in str(e) + else: + assert False, "FileNotFoundError not raised" + + def test_get_config_file_not_dict(self, tmp_path): + config_path = tmp_path / "config.json" + config_path.write_text(json.dumps([1, 2, 3]), encoding="utf-8") + + try: + get_config_file(str(config_path)) + except TypeError as e: + assert "is not a dictionary" in str(e) + else: + assert False, "TypeError not raised for non-dict config"