diff --git a/app/management/commands/create_apps.py b/app/management/commands/create_apps.py new file mode 100644 index 0000000..ba9a98e --- /dev/null +++ b/app/management/commands/create_apps.py @@ -0,0 +1,147 @@ +import time + +import pandas as pd +from django.contrib.auth.models import User +from django.core.management.base import BaseCommand + +from app.services import AppService +from metric.models import Metric +from polling.services.polling_schedule_service import PollingScheduleService +from source.models import Source + + +class Command(BaseCommand): + help = "Crea les Sources i Metric bàsiques" + + def handle(self, *args, **kwargs): + average_rating = Metric.objects.create( + code="average_rating", + name="Average Rating", + value_type="float", + description="Average user rating of the app.", + ) + total_reviews = Metric.objects.create( + code="total_reviews", + name="Total Reviews", + value_type="integer", + description="Total number of user reviews.", + ) + daily_news_blog_mentions = Metric.objects.create( + code="daily_news_blog_mentions", + name="Daily News Blog Mentions", + value_type="integer", + description="Number of daily mentions in news blogs.", + ) + daily_social_network_mentions = Metric.objects.create( + code="daily_social_network_mentions", + name="Daily Social Network Mentions", + value_type="integer", + description="Number of daily mentions on social networks.", + ) + total_downloads = Metric.objects.create( + code="total_downloads", + name="Total Downloads", + value_type="integer", + description="Total number of app downloads.", + ) + last_update_date = Metric.objects.create( + code="last_update_date", + name="Last Update Date", + value_type="date", + description="Date of the app’s last update.", + ) + Metric.objects.create( + code="bug_rate", + name="Bug Rate", + value_type="float", + description="Proportion of reviews that mention errors" + " or bugs relative to the total number of reviews.", + is_derived=True, + ) + Metric.objects.create( + code="positive_rate", + name="Positive Rate", + value_type="float", + description="Proportion of reviews with positive sentiment" + " relative to the total number of reviews.", + is_derived=True, + ) + Metric.objects.create( + code="update_changed", + name="Update Changed", + value_type="integer", + description="Indicates whether the last update date has changed" + " compared to the previous day.\nValue is 1 if it changed, 0 otherwise.", + id_derived=True, + ) + Source.objects.create( + code="itunes", + name="App Store", + type="api", + url="https://itunes.apple.com", + ).metrics.set([average_rating, total_reviews]) + Source.objects.create( + code="google_play", + name="Google Play Scraper", + type="scraper", + url="https://play.google.com", + ).metrics.set([average_rating, total_reviews, total_downloads, last_update_date]) + Source.objects.create( + code="news", + name="News API", + type="api", + url="https://newsapi.org/v2", + ).metrics.set([daily_news_blog_mentions]) + Source.objects.create( + code="reddit", + name="Reddit API", + type="api", + ).metrics.set([daily_social_network_mentions]) + + user, created = User.objects.get_or_create( + username="Anyer", + defaults={ + "email": "anyer@example.com", + "is_superuser": True, + "is_staff": True, + }, + ) + if created: + user.set_password("Anyer123") + user.save() + + service = AppService() + poll_service = PollingScheduleService() + + df_page1 = pd.read_excel("apps_updated_filtered.xlsx", sheet_name="Sheet1") + created_count = 0 + + for index, row in df_page1.iterrows(): + try: + validated_data = { + "name": row["name"], + "appstore_id": ( + str(int(row["apple_store_id"])) + if not pd.isna(row["apple_store_id"]) + else None + ), + "playstore_id": str(row["google_play_id"]), + } + app = service.create_app(validated_data, user) + polling_schedule_metrics = poll_service.get_polling_schedule( + app.id, "metrics", [app.id] + ) + polling_schedule_reviews = poll_service.get_polling_schedule( + app.id, "reviews", [app.id] + ) + poll_service.activate_polling_schedule(polling_schedule_metrics) + poll_service.activate_polling_schedule(polling_schedule_reviews) + + created_count = created_count + 1 + time.sleep(0.1) + except Exception as e: + print(f"❌ Error en la fila {index}: {e}") + + self.stdout.write( + self.style.SUCCESS(f"✔ {created_count} apps creadas y polling activado correctamente.") + ) diff --git a/app/serializers.py b/app/serializers.py index 5bc60e5..fd5a156 100644 --- a/app/serializers.py +++ b/app/serializers.py @@ -1,7 +1,5 @@ from rest_framework import serializers -from metric.serializers import MetricDashboardSerializer - from .models import App @@ -36,28 +34,3 @@ class AppCreateSerializer(serializers.ModelSerializer): class Meta: model = App fields = ["name", "description", "appstore_id", "playstore_id"] - - -class MetricHistorySerializer(serializers.Serializer): - date = serializers.DateField() - value = serializers.SerializerMethodField() - - def get_value(self, obj): - value_type = self.context.get("value_type", "string") - - if value_type == "float": - return float(obj["value"]) - elif value_type == "int": - return int(obj["value"]) - else: - return obj["value"] - - -class SourceHistorySerializer(serializers.Serializer): - source = serializers.CharField() - history = MetricHistorySerializer(many=True) - - -class MetricResponseSerializer(serializers.Serializer): - metric = MetricDashboardSerializer() - sources = SourceHistorySerializer(many=True) diff --git a/app/services.py b/app/services.py index 03c7639..ffa7100 100644 --- a/app/services.py +++ b/app/services.py @@ -3,7 +3,6 @@ from django.core.exceptions import ObjectDoesNotExist from rest_framework.exceptions import NotFound -from metric.services import MetricService, MetricValueService from polling.services.polling_schedule_service import PollingScheduleService from source.adapters.google_play_scraper import GooglePlayScraperAdapter from source.adapters.itunes import ItunesSearchAPIAdapter @@ -12,10 +11,8 @@ class AppService: - def __init__(self, google_play_adapter=None): + def __init__(self): self.repo = AppRepository() - self.metric_service = MetricService() - self.metric_value_service = MetricValueService() self.polling_schedule_service = PollingScheduleService() self.itunes_adapter = ItunesSearchAPIAdapter() self.google_play_adapter = GooglePlayScraperAdapter() @@ -101,43 +98,3 @@ def update_app(self, instance, validated_data): def delete_app(self, instance): return self.repo.delete(instance) - - def get_metric_dashboard(self, app_id: str, metric_id: str) -> dict: - metric = self.metric_service.get_metric(metric_id) - if not metric.is_derived: - metric_values = self.metric_value_service.get_metric_values_by_app_and_metric( - app_id, metric_id - ) - values = [ - { - "retrieved_at": metric_value.retrieved_at.date(), - "value": metric_value.value, - "source": metric_value.source.name if metric_value.source else "Internal", - } - for metric_value in metric_values - ] - else: - values = self.metric_value_service.get_derived_metric_values_by_app_and_metric( - app_id, metric.code - ) - for value in values: - if "source" not in value: - value["source"] = "Internal" - - sources_data = {} - for value in values: - source_name = value["source"] - sources_data.setdefault(source_name, {"source": source_name, "history": []}) - sources_data[source_name]["history"].append( - {"date": value["retrieved_at"], "value": value["value"]} - ) - - return { - "metric": { - "code": metric.code, - "name": metric.name, - "description": metric.description, - "value_type": metric.value_type, - }, - "sources": list(sources_data.values()), - } diff --git a/app/tests/test_create_apps.py b/app/tests/test_create_apps.py index 82f8113..ff02339 100644 --- a/app/tests/test_create_apps.py +++ b/app/tests/test_create_apps.py @@ -1,3 +1,5 @@ +import datetime + import pytest from rest_framework import status from rest_framework.test import APIClient @@ -6,15 +8,14 @@ @pytest.mark.django_db -@pytest.mark.usefixtures("dummy_app") +@pytest.mark.usefixtures("dummy_user") class TestAppCreateViewSet: - def test_create_app_success(self, dummy_app, create_default_sources_and_metrics): + def test_create_app_success(self, dummy_user): client = APIClient() - client.force_authenticate(user=dummy_app.user) + client.force_authenticate(user=dummy_user) payload = { "name": "Test App", - "code": "test_app", "appstore_id": "123456789", "playstore_id": "com.example.app", } @@ -22,13 +23,12 @@ def test_create_app_success(self, dummy_app, create_default_sources_and_metrics) response = client.post("/api/apps/", data=payload, format="json") assert response.status_code == status.HTTP_201_CREATED - assert App.objects.filter(name="Test App", user=dummy_app.user).exists() + assert App.objects.filter(name="Test App", user=dummy_user).exists() def test_create_app_unauthenticated(self): client = APIClient() payload = { "name": "Test App", - "code": "test_app", "appstore_id": "123456789", "playstore_id": "com.example.app", } @@ -37,13 +37,13 @@ def test_create_app_unauthenticated(self): assert response.status_code == status.HTTP_401_UNAUTHORIZED - def test_create_app_missing_fields(self, dummy_app): + def test_create_app_missing_fields(self, dummy_user): client = APIClient() - client.force_authenticate(user=dummy_app.user) + client.force_authenticate(user=dummy_user) payload = { # Falta 'name' i altres - "code": "test_app" + "appstore_id": "123456789" } response = client.post("/api/apps/", data=payload, format="json") @@ -52,3 +52,22 @@ def test_create_app_missing_fields(self, dummy_app): assert ( "name" in response.json()["errors"][0].lower() or "name" in str(response.json()).lower() ) + + def test_create_app_with_real_values(self, dummy_user): + client = APIClient() + client.force_authenticate(user=dummy_user) + + payload = { + "name": "Discord", + "appstore_id": "985746746", + "playstore_id": "com.discord", + } + + response = client.post("/api/apps/", data=payload, format="json") + + assert response.status_code == status.HTTP_201_CREATED + app = App.objects.get(name="Discord", user=dummy_user) + assert app.developer == "Discord, Inc." + assert app.release_date == datetime.date(2015, 5, 21) + assert app.available_on_ios is True + assert app.available_on_android is True diff --git a/app/views.py b/app/views.py index 43e83e7..602b250 100644 --- a/app/views.py +++ b/app/views.py @@ -1,6 +1,5 @@ from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema from rest_framework import status, viewsets -from rest_framework.decorators import action from rest_framework.response import Response from schemas.responses import ( @@ -9,7 +8,7 @@ not_found_response, ) -from .serializers import AppCreateSerializer, AppSerializer, MetricResponseSerializer +from .serializers import AppCreateSerializer, AppSerializer from .services import AppService @@ -118,32 +117,3 @@ def destroy(self, request, pk=None): app = self.service.get_app_by_user(pk, user=request.user) self.service.delete_app(app) return Response(status=status.HTTP_204_NO_CONTENT) - - @extend_schema( - summary="Get app metric dashboard", - description=( - "Retrieves the historical dashboard for a specific metric of a given app.\n\n" - "If the metric is a direct (raw) metric, this endpoint returns its historical values " - "grouped by data source (e.g. App Store, Google Play, or internal sources).\n" - "If the metric is a derived one, it is computed dynamically " - "and presented similarly.\n\n" - "Each source includes a list of historical records, each with a date and value. " - "Derived metrics are labeled as coming from the 'Internal' source." - ), - parameters=[ - OpenApiParameter(name="id", required=True, type=int, location="path"), - OpenApiParameter(name="metric_id", required=True, type=int, location="path"), - ], - responses={ - 200: MetricResponseSerializer, - 401: UNAUTHORIZED_RESPONSE, - 404: not_found_response("app", True), - }, - tags=["Apps"], - ) - @action(detail=True, methods=["get"], url_path="metrics/(?P[^/.]+)") - def get_app_metric(self, request, pk=None, metric_id=None): - app = self.service.get_app_by_user(pk, user=request.user) - response_data = self.service.get_metric_dashboard(app_id=app.id, metric_id=metric_id) - serializer = MetricResponseSerializer(response_data) - return Response(serializer.data) diff --git a/apps_updated_filtered.xlsx b/apps_updated_filtered.xlsx new file mode 100644 index 0000000..799b0c8 Binary files /dev/null and b/apps_updated_filtered.xlsx differ diff --git a/conftest.py b/conftest.py index a56dbfa..19e2148 100644 --- a/conftest.py +++ b/conftest.py @@ -94,9 +94,8 @@ def dummy_review(db, dummy_app, dummy_source): ) -@pytest.fixture +@pytest.fixture(autouse=True) def create_default_sources_and_metrics(db): - """Crea sources i mètriques per defecte disponibles a tots els tests.""" itunes = Source.objects.create( code="itunes", name="App Store", type="api", url="https://itunes.apple.com" ) @@ -112,7 +111,6 @@ def create_default_sources_and_metrics(db): MetricCode.AVERAGE_RATING, MetricCode.TOTAL_REVIEWS, MetricCode.TOTAL_DOWNLOADS, - MetricCode.LAST_UPDATE_DATE, MetricCode.DAILY_SOCIAL_NETWORK_MENTIONS, MetricCode.DAILY_NEWS_BLOG_MENTIONS, ]: diff --git a/metric/repositories.py b/metric/repositories.py index f2ea367..f2b4967 100644 --- a/metric/repositories.py +++ b/metric/repositories.py @@ -9,27 +9,24 @@ def get_by_id(self, pk): return Metric.objects.get(id=pk) def create(self, data): - return Metric.objects.create(**data) + sources = data.pop("sources", []) + metric = Metric.objects.create(**data) + if sources: + metric.sources.set(sources) + return metric def update(self, instance, data): + sources = data.pop("sources", None) for attr, value in data.items(): setattr(instance, attr, value) instance.save() + if sources is not None: + instance.sources.set(sources) return instance def delete(self, instance): instance.delete() - def add_sources(self, instance, sources_ids): - instance.sources.add(*sources_ids) - instance.save() - return instance - - def remove_sources(self, instance, sources_ids): - instance.sources.remove(*sources_ids) - instance.save() - return instance - def get_by_code(self, code): return Metric.objects.get(code=code) @@ -58,15 +55,6 @@ def get_by_id(self, pk): def create(self, data): return MetricValue.objects.create(**data) - def update(self, instance, data): - for attr, value in data.items(): - setattr(instance, attr, value) - instance.save() - return instance - - def delete(self, instance): - instance.delete() - def get_by_app_and_metric(self, app_id, metric_id): return ( MetricValue.objects.filter(app_id=app_id, metric_id=metric_id) diff --git a/metric/serializers.py b/metric/serializers.py index b156a0e..eb08e2e 100644 --- a/metric/serializers.py +++ b/metric/serializers.py @@ -1,16 +1,20 @@ from rest_framework import serializers +from source.models import Source + from .constants.value_types import MetricValueType from .models import Metric, MetricValue class MetricSerializer(serializers.ModelSerializer): value_type = serializers.ChoiceField(choices=MetricValueType.choices) + sources = serializers.PrimaryKeyRelatedField( + queryset=Source.objects.all(), many=True, required=False + ) class Meta: model = Metric fields = ["id", "code", "name", "description", "value_type", "is_derived", "sources"] - read_only_fields = ["sources"] class MetricValueSerializer(serializers.ModelSerializer): @@ -40,3 +44,28 @@ class MetricDashboardSerializer(serializers.ModelSerializer): class Meta: model = Metric fields = ["code", "name", "description", "value_type"] + + +class MetricHistorySerializer(serializers.Serializer): + date = serializers.DateField() + value = serializers.SerializerMethodField() + + def get_value(self, obj): + value_type = self.context.get("value_type", "string") + + if value_type == "float": + return float(obj["value"]) + elif value_type == "int": + return int(obj["value"]) + else: + return obj["value"] + + +class SourceHistorySerializer(serializers.Serializer): + source = serializers.CharField() + history = MetricHistorySerializer(many=True) + + +class MetricResponseSerializer(serializers.Serializer): + metric = MetricDashboardSerializer() + sources = SourceHistorySerializer(many=True) diff --git a/metric/services.py b/metric/services.py index d7c1a0c..05ee57e 100644 --- a/metric/services.py +++ b/metric/services.py @@ -1,6 +1,12 @@ +import csv +from collections import defaultdict +from io import StringIO +from typing import List + from django.core.exceptions import ObjectDoesNotExist from rest_framework.exceptions import NotFound +from review.models import ReviewPolarity, ReviewType from review.services import ReviewService from .constants import MetricCode @@ -29,12 +35,6 @@ def update_metric(self, instance, validated_data): def delete_metric(self, instance): return self.repo.delete(instance) - def add_sources(self, instance, sources_ids): - return self.repo.add_sources(instance, sources_ids) - - def remove_sources(self, instance, sources_ids): - return self.repo.remove_sources(instance, sources_ids) - def get_metric_by_code(self, code): try: return self.repo.get_by_code(code) @@ -51,28 +51,51 @@ def __init__(self): def list_metric_values(self, filters=None): return self.repo.get_all(filters) - def get_metric_value(self, pk): - try: - return self.repo.get_by_id(pk) - except ObjectDoesNotExist: - raise NotFound(f"The metric_value with ID '{pk}' is not registered.") - def create_metric_value(self, validated_data): return self.repo.create(validated_data) - def update_metric_value(self, instance, validated_data): - return self.repo.update(instance, validated_data) - - def delete_metric_value(self, instance): - return self.repo.delete(instance) - - def get_metric_values_by_app_and_metric(self, app_id, metric_id): - return self.repo.get_by_app_and_metric(app_id, metric_id) - - def get_derived_metric_values_by_app_and_metric(self, app_id, metric_code): - reviews = self.review_service.list_reviews(filters={"app_id": app_id}) + def get_metric_dashboard(self, app_id: str, metric_id: str, authorized_apps: List[int]) -> dict: + if app_id not in authorized_apps: + raise NotFound(f"The app with ID '{app_id}' is not registered or not authorized.") + metric = self.metric_service.get_metric(metric_id) + if not metric.is_derived: + metric_values = self.repo.get_by_app_and_metric(app_id, metric_id) + values = [ + { + "retrieved_at": metric_value.retrieved_at.date(), + "value": metric_value.value, + "source": metric_value.source.name if metric_value.source else "Internal", + } + for metric_value in metric_values + ] + else: + values = self._get_derived_metric_values_by_app_and_metric(app_id, metric.code) + for value in values: + if "source" not in value: + value["source"] = "Internal" + + sources_data = {} + for value in values: + source_name = value["source"] + sources_data.setdefault(source_name, {"source": source_name, "history": []}) + sources_data[source_name]["history"].append( + {"date": value["retrieved_at"], "value": value["value"]} + ) + + return { + "metric": { + "code": metric.code, + "name": metric.name, + "description": metric.description, + "value_type": metric.value_type, + }, + "sources": list(sources_data.values()), + } + + def _get_derived_metric_values_by_app_and_metric(self, app_id, metric_code): derived_values = [] if metric_code == MetricCode.BUG_RATE: + reviews = self.review_service.list_reviews(filters={"app_id": app_id}) total_reviews = {} bug_reviews = {} @@ -82,7 +105,7 @@ def get_derived_metric_values_by_app_and_metric(self, app_id, metric_code): key = (date, source) total_reviews[key] = total_reviews.get(key, 0) + 1 - if review.type == "Bug": + if review.type == ReviewType.BUG: bug_reviews[key] = bug_reviews.get(key, 0) + 1 for key in total_reviews: @@ -93,7 +116,10 @@ def get_derived_metric_values_by_app_and_metric(self, app_id, metric_code): derived_values.append({"retrieved_at": date, "value": frequency, "source": source}) elif metric_code == MetricCode.POSITIVE_RATE: + reviews = self.review_service.list_reviews(filters={"app_id": app_id}) polarity_counts = {} + positive = ReviewPolarity.POSITIVE + negative = ReviewPolarity.NEGATIVE for review in reviews: date = review.date.strftime("%Y-%m-%d") @@ -101,42 +127,79 @@ def get_derived_metric_values_by_app_and_metric(self, app_id, metric_code): key = (date, source) if key not in polarity_counts: - polarity_counts[key] = {"positive": 0, "negative": 0} + polarity_counts[key] = {positive: 0, negative: 0} - if review.polarity in ["positive", "negative"]: + if review.polarity in [positive, negative]: polarity_counts[key][review.polarity] += 1 for key, counts in polarity_counts.items(): date, source = key - total = counts["positive"] + counts["negative"] - frequency = counts["positive"] / total if total > 0 else 0 + total = counts[positive] + counts[negative] + frequency = counts[positive] / total if total > 0 else 0 derived_values.append({"retrieved_at": date, "value": frequency, "source": source}) elif metric_code == MetricCode.UPDATE_CHANGED: metric = self.metric_service.get_metric_by_code(MetricCode.LAST_UPDATE_DATE) last_update_values = self.repo.get_by_app_and_metric(app_id, metric.id) grouped_by_source = {} + for value in last_update_values: source_name = value.source.name if value.source else "Internal" grouped_by_source.setdefault(source_name, []).append(value) - # Tractem cada font independentment for source, values in grouped_by_source.items(): - previous_value = None + # Agrupem per data de recollida + grouped_by_date = defaultdict(list) for value in values: + retrieved_date = value.retrieved_at.date() + grouped_by_date[retrieved_date].append(value) + previous_update_date = None + for retrieved_date in sorted(grouped_by_date.keys()): + group = grouped_by_date[retrieved_date] + # Valorem el primer LAST_UPDATE_DATE del dia com a representatiu current_update_date = ( - value.value.date() if hasattr(value.value, "date") else value.value + group[0].value.date() if hasattr(group[0].value, "date") else group[0].value ) + change_detected = 0 - if previous_value is not None and current_update_date != previous_value: + if ( + previous_update_date is not None + and current_update_date != previous_update_date + ): change_detected = 1 - derived_values.append( - { - "retrieved_at": value.retrieved_at.strftime("%Y-%m-%d"), - "value": change_detected, - "source": source, - } - ) - previous_value = current_update_date + + for value in group: + derived_values.append( + { + "retrieved_at": value.retrieved_at.strftime("%Y-%m-%d"), + "value": change_detected, + "source": source, + } + ) + previous_update_date = current_update_date return derived_values + + def get_metrics_csv(self, app_id, authorized_apps): + if int(app_id) not in authorized_apps: + raise NotFound(f"The app with ID '{app_id}' is not registered or not authorized.") + + output = StringIO() + writer = csv.writer(output) + writer.writerow(["metric_code", "source", "retrieved_at", "value"]) + + all_metrics = self.metric_service.list_metrics() + for metric in all_metrics: + dashboard = self.get_metric_dashboard(app_id, metric.id, authorized_apps) + for source in dashboard["sources"]: + for history in source["history"]: + writer.writerow( + [ + dashboard["metric"]["code"], + source["source"], + history["date"], + history["value"], + ] + ) + + return output.getvalue() diff --git a/metric/tests/test_create_metrics.py b/metric/tests/test_create_metrics.py index e0eaf47..a66b79b 100644 --- a/metric/tests/test_create_metrics.py +++ b/metric/tests/test_create_metrics.py @@ -70,3 +70,40 @@ def test_create_metric_missing_fields(self, dummy_superuser): assert ( "name" in response.json()["errors"][0].lower() or "name" in str(response.json()).lower() ) + + def test_create_metric_with_source(self, dummy_superuser, dummy_source): + client = APIClient() + client.force_authenticate(user=dummy_superuser) + + payload = { + "code": "test_metric", + "name": "Test Metric", + "value_type": "integer", + "description": "A test metric.", + "is_derived": False, + "sources": [dummy_source.id], + } + + response = client.post("/api/metrics/", data=payload, format="json") + + assert response.status_code == status.HTTP_201_CREATED + assert Metric.objects.filter(name="Test Metric").exists() + + def test_create_metric_with_nonexistent_source(self, dummy_superuser): + client = APIClient() + client.force_authenticate(user=dummy_superuser) + + payload = { + "code": "test_metric", + "name": "Test Metric", + "value_type": "integer", + "description": "A test metric.", + "is_derived": False, + "sources": [9999], + } + + response = client.post("/api/metrics/", data=payload, format="json") + + assert response.status_code == status.HTTP_400_BAD_REQUEST + assert ("errors") in response.data + assert "Invalid pk" in response.data["errors"][0] diff --git a/metric/tests/test_derived_metric_bug_rate.py b/metric/tests/test_derived_metric_bug_rate.py new file mode 100644 index 0000000..09417e2 --- /dev/null +++ b/metric/tests/test_derived_metric_bug_rate.py @@ -0,0 +1,42 @@ +import datetime + +import pytest +from django.utils import timezone + +from metric.constants import MetricCode +from metric.services import MetricValueService +from review.models import Review, ReviewPolarity, ReviewType +from source.models import Source + + +@pytest.mark.django_db +def test_update_changed_multiple_values_same_date(dummy_app): + source = Source.objects.get(code="google_play") + Review.objects.create( + app=dummy_app, + source=source, + content="Test not bug review", + date=timezone.make_aware(datetime.datetime(2023, 6, 1)), + polarity=ReviewPolarity.POSITIVE, + type=ReviewType.FEATURE, + ) + Review.objects.create( + app=dummy_app, + source=source, + content="Test bug review", + date=timezone.make_aware(datetime.datetime(2023, 6, 1)), + polarity=ReviewPolarity.POSITIVE, + type=ReviewType.BUG, + ) + + expected_output = [ + {"retrieved_at": "2023-06-01", "value": 0.5, "source": "Google Play"}, + ] + + service = MetricValueService() + output = service._get_derived_metric_values_by_app_and_metric( + app_id=dummy_app.id, + metric_code=MetricCode.BUG_RATE, + ) + + assert output == expected_output diff --git a/metric/tests/test_derived_metric_positive_rate.py b/metric/tests/test_derived_metric_positive_rate.py new file mode 100644 index 0000000..1ef7826 --- /dev/null +++ b/metric/tests/test_derived_metric_positive_rate.py @@ -0,0 +1,42 @@ +import datetime + +import pytest +from django.utils import timezone + +from metric.constants import MetricCode +from metric.services import MetricValueService +from review.models import Review, ReviewPolarity, ReviewType +from source.models import Source + + +@pytest.mark.django_db +def test_update_changed_multiple_values_same_date(dummy_app): + source = Source.objects.get(code="google_play") + Review.objects.create( + app=dummy_app, + source=source, + content="Test positive review", + date=timezone.make_aware(datetime.datetime(2023, 6, 10)), + polarity=ReviewPolarity.POSITIVE, + type=ReviewType.FEATURE, + ) + Review.objects.create( + app=dummy_app, + source=source, + content="Test negative review", + date=timezone.make_aware(datetime.datetime(2023, 6, 10)), + polarity=ReviewPolarity.NEGATIVE, + type=ReviewType.FEATURE, + ) + + expected_output = [ + {"retrieved_at": "2023-06-10", "value": 0.5, "source": "Google Play"}, + ] + + service = MetricValueService() + output = service._get_derived_metric_values_by_app_and_metric( + app_id=dummy_app.id, + metric_code=MetricCode.POSITIVE_RATE, + ) + + assert output == expected_output diff --git a/metric/tests/test_derived_metric_update_changed.py b/metric/tests/test_derived_metric_update_changed.py new file mode 100644 index 0000000..b5dfddf --- /dev/null +++ b/metric/tests/test_derived_metric_update_changed.py @@ -0,0 +1,82 @@ +import datetime + +import pytest +from django.utils import timezone + +from metric.constants import MetricCode +from metric.models import Metric, MetricValue +from metric.services import MetricValueService +from source.models import Source + + +@pytest.mark.django_db +def test_update_changed_multiple_values_same_date(dummy_app): + # Creem la mètrica base: LAST_UPDATE_DATE + metric = Metric.objects.create( + code=MetricCode.LAST_UPDATE_DATE, + name="Last Update Date", + description="The last date the app was updated.", + value_type="date", + is_derived=False, + ) + + source = Source.objects.get(code="google_play") + + # Afegim múltiples valors de la mateixa data de recollida + MetricValue.objects.bulk_create( + [ + MetricValue( + app=dummy_app, + metric=metric, + value=timezone.make_aware(datetime.datetime(2023, 6, 15)), + source=source, + retrieved_at=timezone.make_aware(datetime.datetime(2023, 6, 1)), + ), + MetricValue( + app=dummy_app, + metric=metric, + value=timezone.make_aware(datetime.datetime(2023, 6, 15)), + source=source, + retrieved_at=timezone.make_aware(datetime.datetime(2023, 6, 1)), + ), + MetricValue( + app=dummy_app, + metric=metric, + value=timezone.make_aware(datetime.datetime(2023, 6, 16)), + source=source, + retrieved_at=timezone.make_aware(datetime.datetime(2023, 6, 2)), + ), + MetricValue( + app=dummy_app, + metric=metric, + value=timezone.make_aware(datetime.datetime(2023, 6, 16)), + source=source, + retrieved_at=timezone.make_aware(datetime.datetime(2023, 6, 2)), + ), + MetricValue( + app=dummy_app, + metric=metric, + value=timezone.make_aware(datetime.datetime(2023, 6, 16)), + source=source, + retrieved_at=timezone.make_aware(datetime.datetime(2023, 6, 3)), + ), + ] + ) + + expected_output = [ + {"retrieved_at": "2023-06-01", "value": 0, "source": "Google Play"}, + {"retrieved_at": "2023-06-01", "value": 0, "source": "Google Play"}, + {"retrieved_at": "2023-06-02", "value": 1, "source": "Google Play"}, + {"retrieved_at": "2023-06-02", "value": 1, "source": "Google Play"}, + {"retrieved_at": "2023-06-03", "value": 0, "source": "Google Play"}, + ] + + # Calculem la mètrica derivada UPDATE_CHANGED + service = MetricValueService() + output = service._get_derived_metric_values_by_app_and_metric( + app_id=dummy_app.id, + metric_code=MetricCode.UPDATE_CHANGED, + ) + + # Comprovem que el comportament sigui l'esperat + assert output == expected_output, f"Expected {expected_output}, but got {output}" diff --git a/app/tests/test_get_app_metric.py b/metric/tests/test_get_app_metric.py similarity index 90% rename from app/tests/test_get_app_metric.py rename to metric/tests/test_get_app_metric.py index d291cf3..66439b7 100644 --- a/app/tests/test_get_app_metric.py +++ b/metric/tests/test_get_app_metric.py @@ -4,7 +4,6 @@ @pytest.mark.django_db -@pytest.mark.usefixtures("dummy_app", "dummy_metric") class TestGetAppMetricDashboard: def test_get_metric_dashboard_success(self, dummy_app, dummy_metric): client = APIClient() @@ -16,7 +15,9 @@ def test_get_metric_dashboard_success(self, dummy_app, dummy_metric): assert response.status_code == status.HTTP_200_OK assert "sources" in response.json() - def test_get_metric_dashboard_unauthenticated(self, dummy_app, dummy_metric): + def test_get_metric_dashboard_unauthenticated( + self, dummy_app, dummy_metric, dummy_metric_value + ): client = APIClient() url = f"/api/apps/{dummy_app.id}/metrics/{dummy_metric.id}/" response = client.get(url) diff --git a/metric/tests/test_get_metrics_csv.py b/metric/tests/test_get_metrics_csv.py new file mode 100644 index 0000000..44b77bc --- /dev/null +++ b/metric/tests/test_get_metrics_csv.py @@ -0,0 +1,29 @@ +import pytest +from rest_framework import status +from rest_framework.test import APIClient + + +@pytest.mark.django_db +class TestExportMetricsCSV: + def test_export_metrics_success(self, dummy_app, dummy_metric_value): + client = APIClient() + client.force_authenticate(user=dummy_app.user) + + response = client.get(f"/api/apps/{dummy_app.id}/metrics/csv/") + + assert response.status_code == status.HTTP_200_OK + assert response["Content-Type"] == "text/csv" + assert "metric_code" in response.content.decode() + + def test_export_metrics_unauthenticated(self, dummy_app): + client = APIClient() + + response = client.get(f"/api/apps/{dummy_app.id}/metrics/csv/") + assert response.status_code == status.HTTP_401_UNAUTHORIZED + + def test_export_metrics_app_not_found(self, dummy_app): + client = APIClient() + client.force_authenticate(user=dummy_app.user) + + response = client.get("/api/apps/999999/metrics/csv/") + assert response.status_code == status.HTTP_404_NOT_FOUND diff --git a/metric/tests/test_update_metrics.py b/metric/tests/test_update_metrics.py index 9d02540..9128dbe 100644 --- a/metric/tests/test_update_metrics.py +++ b/metric/tests/test_update_metrics.py @@ -58,3 +58,40 @@ def test_update_metric_invalid_data(self, dummy_metric, dummy_superuser): response = client.put(f"/api/metrics/{dummy_metric.id}/", data=payload, format="json") assert response.status_code == status.HTTP_400_BAD_REQUEST assert "name" in str(response.json()).lower() + + def test_update_metric_with_source(self, dummy_metric, dummy_superuser, dummy_source): + client = APIClient() + client.force_authenticate(user=dummy_superuser) + + payload = { + "code": dummy_metric.code, + "name": "Updated Name", + "value_type": "float", + "description": "New description", + "is_derived": False, + "sources": [dummy_source.id], + } + + response = client.put(f"/api/metrics/{dummy_metric.id}/", data=payload, format="json") + assert response.status_code == status.HTTP_200_OK, f"Errors: {response.json()}" + + assert response.json()["name"] == "Updated Name" + assert response.json()["description"] == "New description" + + def test_update_metric_with_nonexistent_source(self, dummy_metric, dummy_superuser): + client = APIClient() + client.force_authenticate(user=dummy_superuser) + + payload = { + "code": dummy_metric.code, + "name": "Updated Name", + "value_type": "float", + "description": "New description", + "is_derived": False, + "sources": [9999], + } + + response = client.put(f"/api/metrics/{dummy_metric.id}/", data=payload, format="json") + assert response.status_code == status.HTTP_400_BAD_REQUEST + assert ("errors") in response.data + assert "Invalid pk" in response.data["errors"][0] diff --git a/metric/urls.py b/metric/urls.py index 9891c6c..7f3091d 100644 --- a/metric/urls.py +++ b/metric/urls.py @@ -9,4 +9,14 @@ urlpatterns = [ path("", include(router.urls)), + path( + "apps//metrics//", + MetricValueViewSet.as_view({"get": "get_app_metric"}), + name="get-app-metric", + ), + path( + "apps//metrics/csv/", + MetricValueViewSet.as_view({"get": "get_metrics_csv"}), + name="get-metrics-csv", + ), ] diff --git a/metric/views.py b/metric/views.py index 7891780..ada1438 100644 --- a/metric/views.py +++ b/metric/views.py @@ -1,6 +1,6 @@ +from django.http import HttpResponse from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema from rest_framework import status, viewsets -from rest_framework.decorators import action from rest_framework.response import Response from permissions.decorators import superuser_required @@ -10,9 +10,8 @@ UNAUTHORIZED_RESPONSE, not_found_response, ) -from source.serializers import LinkSourceSerializer -from .serializers import MetricSerializer, MetricValueSerializer +from .serializers import MetricResponseSerializer, MetricSerializer, MetricValueSerializer from .services import MetricService, MetricValueService @@ -106,7 +105,7 @@ def create(self, request): 400: BAD_REQUEST_RESPONSE, 401: UNAUTHORIZED_RESPONSE, 403: FORBIDDEN_RESPONSE, - 404: not_found_response("metrric", False), + 404: not_found_response("metric", False), }, tags=["Metrics"], ) @@ -126,7 +125,7 @@ def update(self, request, pk=None): 204: OpenApiResponse(description="No Content – The metric was successfully deleted."), 401: UNAUTHORIZED_RESPONSE, 403: FORBIDDEN_RESPONSE, - 404: not_found_response("metrric", False), + 404: not_found_response("metric", False), }, tags=["Metrics"], ) @@ -136,73 +135,6 @@ def destroy(self, request, pk=None): self.service.delete_metric(metric) return Response(status=status.HTTP_204_NO_CONTENT) - @extend_schema( - summary="Add sources to a metric", - description=( - "Adds a list of source IDs to the metric without removing existing sources.\n\n" - "The input must be a JSON object with a `sources` field containing a list of IDs.\n\n" - "**Example**:\n" - '`{ "sources": [1, 2, 3] }`' - ), - request=LinkSourceSerializer, - responses={ - 200: MetricSerializer, - 400: BAD_REQUEST_RESPONSE, - 401: UNAUTHORIZED_RESPONSE, - 403: FORBIDDEN_RESPONSE, - 404: not_found_response("metrric", False), - }, - tags=["Metrics"], - ) - @superuser_required - @action(detail=True, methods=["post"], url_path="sources") - def add_sources(self, request, pk=None): - sources_ids = request.data.get("sources", []) - - if not isinstance(sources_ids, list): - return Response( - {"error": "sources must be a list of IDs"}, status=status.HTTP_400_BAD_REQUEST - ) - - metric = self.service.get_metric(pk) - metric = self.service.add_sources(metric, sources_ids) - - return Response(MetricSerializer(metric).data, status=status.HTTP_200_OK) - - @extend_schema( - summary="Remove sources from a metric", - description=( - "Removes a list of source IDs from the metric.\n\n" - "The input must be a JSON object with a `sources` field containing a list of IDs.\n\n" - "**Example**:\n" - '`{ "sources": [2, 3] }`' - ), - request=LinkSourceSerializer, - responses={ - 200: MetricSerializer, - 400: BAD_REQUEST_RESPONSE, - 401: UNAUTHORIZED_RESPONSE, - 403: FORBIDDEN_RESPONSE, - 404: not_found_response("metrric", False), - }, - tags=["Metrics"], - methods=["DELETE"], - ) - @superuser_required - @action(detail=True, methods=["delete"], url_path="sources") - def remove_sources(self, request, pk=None): - sources_ids = request.data.get("sources", []) - - if not isinstance(sources_ids, list): - return Response( - {"error": "sources must be a list of IDs"}, status=status.HTTP_400_BAD_REQUEST - ) - - metric = self.service.get_metric(pk) - metric = self.service.remove_sources(metric, sources_ids) - - return Response(MetricSerializer(metric).data, status=status.HTTP_200_OK) - class MetricValueViewSet(viewsets.ViewSet): service = MetricValueService() @@ -251,3 +183,56 @@ def list(self, request): metrics = self.service.list_metric_values(filters) serializer = MetricValueSerializer(metrics, many=True) return Response(serializer.data) + + @extend_schema( + summary="Get app metric dashboard", + description=( + "Retrieves the historical dashboard for a specific metric of a given app.\n\n" + "If the metric is a direct (raw) metric, this endpoint returns its historical values " + "grouped by data source (e.g. App Store, Google Play, or internal sources).\n" + "If the metric is a derived one, it is computed dynamically " + "and presented similarly.\n\n" + "Each source includes a list of historical records, each with a date and value. " + "Derived metrics are labeled as coming from the 'Internal' source." + ), + parameters=[ + OpenApiParameter(name="id", required=True, type=int, location="path"), + OpenApiParameter(name="metric_id", required=True, type=int, location="path"), + ], + responses={ + 200: MetricValueSerializer, + 401: UNAUTHORIZED_RESPONSE, + 404: not_found_response("app", True), + }, + tags=["Metrics"], + methods=["get"], + ) + def get_app_metric(self, request, id=None, metric_id=None): + user_authorized_app_ids = request.user.apps.values_list("id", flat=True) + response_data = self.service.get_metric_dashboard( + id, metric_id=metric_id, authorized_apps=user_authorized_app_ids + ) + serializer = MetricResponseSerializer(response_data) + return Response(serializer.data) + + @extend_schema( + summary="Export all metric values as CSV", + description=( + "Exports all metric values of an app in CSV format.\n\n" + "The response will contain a CSV file with the metric code," + " source, date of retrieval and value." + ), + parameters=[ + OpenApiParameter(name="id", required=True, type=int, location="path"), + ], + responses={200: {"type": "string", "format": "binary"}}, + tags=["Metrics"], + methods=["get"], + ) + def get_metrics_csv(self, request, id=None): + authorized_apps = request.user.apps.values_list("id", flat=True) + csv_data = self.service.get_metrics_csv(id, authorized_apps) + + response = HttpResponse(csv_data, content_type="text/csv") + response["Content-Disposition"] = "attachment; filename=metrics_export.csv" + return response diff --git a/polling/migrations/0003_alter_pollingschedule_poll_type.py b/polling/migrations/0003_alter_pollingschedule_poll_type.py new file mode 100644 index 0000000..2bc0454 --- /dev/null +++ b/polling/migrations/0003_alter_pollingschedule_poll_type.py @@ -0,0 +1,22 @@ +# Generated by Django 5.1.7 on 2025-06-19 11:12 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("polling", "0002_remove_pollingschedule_start_at"), + ] + + operations = [ + migrations.AlterField( + model_name="pollingschedule", + name="poll_type", + field=models.CharField( + choices=[("metrics", "Metrics only"), ("reviews", "Reviews only")], + default="metrics", + max_length=10, + ), + ), + ] diff --git a/polling/models.py b/polling/models.py index 92a9619..1e73826 100644 --- a/polling/models.py +++ b/polling/models.py @@ -12,7 +12,6 @@ class PollingSchedule(models.Model): POLL_TYPE_CHOICES = [ ("metrics", "Metrics only"), ("reviews", "Reviews only"), - ("both", "Metrics + Reviews"), ] poll_type = models.CharField(max_length=10, choices=POLL_TYPE_CHOICES, default="metrics") diff --git a/polling/repositories.py b/polling/repositories.py index 37bd042..7039f79 100644 --- a/polling/repositories.py +++ b/polling/repositories.py @@ -6,10 +6,6 @@ class PollingRepository: - def get_all(self, poll_type): - queryset = PollingSchedule.objects.filter(poll_type=poll_type) - return queryset - def get_by_app_id(self, app_id, poll_type): return PollingSchedule.objects.get(app_id=app_id, poll_type=poll_type) @@ -44,11 +40,6 @@ def create_periodic_task(self, schedule, app_id, poll_type, task): ) return task - def delete(self, instance): - if instance.periodic_task: - instance.periodic_task.delete() - instance.delete() - def exists(self, app_id, poll_type): return PollingSchedule.objects.filter(app_id=app_id, poll_type=poll_type).exists() diff --git a/polling/services/polling_schedule_service.py b/polling/services/polling_schedule_service.py index 75f6202..7f390e9 100644 --- a/polling/services/polling_schedule_service.py +++ b/polling/services/polling_schedule_service.py @@ -11,10 +11,9 @@ class PollingScheduleService: def __init__(self): self.repo = PollingRepository() - def list_polling_schedules(self, poll_type=None): - return self.repo.get_all(poll_type) - - def get_polling_schedule(self, app_id, poll_type=None): + def get_polling_schedule(self, app_id, poll_type, authorized_app_ids): + if app_id not in authorized_app_ids: + raise NotFound(f"The app with ID '{app_id}' is not registered.") try: return self.repo.get_by_app_id(app_id, poll_type) except ObjectDoesNotExist: @@ -34,9 +33,6 @@ def create_polling_schedule(self, app_id, interval_hours=None, poll_type=None): return schedule - def delete_schedule(self, instance): - return self.repo.delete(instance) - def update_polling_schedule(self, polling_schedule, validated_data): if validated_data.get("interval_hours") is not None: interval_schedule = self.repo.get_or_create_interval_schedule( @@ -67,10 +63,13 @@ def deactivate_polling_schedule(self, polling_schedule): self.repo.deactivate(polling_schedule) return polling_schedule - def poll_reviews(self, app_id, date_from, date_to): + def manual_poll_reviews(self, app_id, date_from, date_to, authorized_app_ids=None): + if authorized_app_ids is not None and app_id not in authorized_app_ids: + raise NotFound(f"The app with ID '{app_id}' is not registered.") + try: return run_polling_task.delay(app_id, "reviews", date_from, date_to) except (RedisConnectionError, KombuOperationalError): raise APIException( - "No se pudo conectar con el servicio de tareas (Redis). Inténtalo más tarde." + "Could not connect to the task service (Redis). Please try again later." ) diff --git a/polling/tests/test_activate_polling.py b/polling/tests/test_activate_polling.py index 3649d67..391b832 100644 --- a/polling/tests/test_activate_polling.py +++ b/polling/tests/test_activate_polling.py @@ -8,7 +8,6 @@ @patch("polling.services.polling_schedule_service.run_polling_task.delay") def test_activate_polling_schedule_with_valid_interval( mock_run_task, - create_default_sources_and_metrics, create_polling_schedule, ): client = APIClient() @@ -27,9 +26,7 @@ def test_activate_polling_schedule_with_valid_interval( @pytest.mark.django_db -def test_activate_polling_schedule_with_invalid_interval( - create_default_sources_and_metrics, create_polling_schedule -): +def test_activate_polling_schedule_with_invalid_interval(create_polling_schedule): client = APIClient() schedule = create_polling_schedule(poll_type="reviews", interval_hours=6) @@ -43,9 +40,7 @@ def test_activate_polling_schedule_with_invalid_interval( @pytest.mark.django_db -def test_activate_polling_schedule_already_active_returns_conflict( - create_default_sources_and_metrics, create_polling_schedule -): +def test_activate_polling_schedule_already_active_returns_conflict(create_polling_schedule): client = APIClient() schedule = create_polling_schedule(poll_type="metrics", interval_hours=6) diff --git a/polling/tests/test_deactivate_polling.py b/polling/tests/test_deactivate_polling.py index 43da444..0ebecce 100644 --- a/polling/tests/test_deactivate_polling.py +++ b/polling/tests/test_deactivate_polling.py @@ -3,9 +3,7 @@ @pytest.mark.django_db -def test_deactivate_polling_schedule_success( - create_polling_schedule, create_default_sources_and_metrics -): +def test_deactivate_polling_schedule_success(create_polling_schedule): client = APIClient() schedule = create_polling_schedule(poll_type="metrics", interval_hours=6) schedule.is_active = True @@ -21,9 +19,7 @@ def test_deactivate_polling_schedule_success( @pytest.mark.django_db -def test_deactivate_polling_schedule_reviews( - create_polling_schedule, create_default_sources_and_metrics -): +def test_deactivate_polling_schedule_reviews(create_polling_schedule): client = APIClient() schedule = create_polling_schedule(poll_type="reviews", interval_hours=8) schedule.is_active = True @@ -39,9 +35,7 @@ def test_deactivate_polling_schedule_reviews( @pytest.mark.django_db -def test_deactivate_polling_schedule_already_inactive_returns_conflict( - create_polling_schedule, create_default_sources_and_metrics -): +def test_deactivate_polling_schedule_already_inactive_returns_conflict(create_polling_schedule): client = APIClient() schedule = create_polling_schedule(poll_type="metrics", interval_hours=6) schedule.is_active = False @@ -57,9 +51,7 @@ def test_deactivate_polling_schedule_already_inactive_returns_conflict( @pytest.mark.django_db -def test_deactivate_polling_schedule_not_found_returns_404( - dummy_user, create_default_sources_and_metrics -): +def test_deactivate_polling_schedule_not_found_returns_404(dummy_user): client = APIClient() client.force_authenticate(user=dummy_user) @@ -70,9 +62,7 @@ def test_deactivate_polling_schedule_not_found_returns_404( @pytest.mark.django_db -def test_deactivate_polling_schedule_requires_authentication( - create_polling_schedule, create_default_sources_and_metrics -): +def test_deactivate_polling_schedule_requires_authentication(create_polling_schedule): client = APIClient() schedule = create_polling_schedule(poll_type="metrics") schedule.is_active = True diff --git a/polling/tests/test_manual_review_polling.py b/polling/tests/test_manual_review_polling.py index ba2d388..2cd86c6 100644 --- a/polling/tests/test_manual_review_polling.py +++ b/polling/tests/test_manual_review_polling.py @@ -6,9 +6,7 @@ @pytest.mark.django_db @patch("polling.services.polling_schedule_service.run_polling_task.delay") -def test_manual_review_polling_success( - mock_run_task, create_polling_schedule, create_default_sources_and_metrics -): +def test_manual_review_polling_success(mock_run_task, create_polling_schedule): client = APIClient() schedule = create_polling_schedule(poll_type="reviews", interval_hours=6) @@ -27,9 +25,7 @@ def test_manual_review_polling_success( @pytest.mark.django_db -def test_manual_review_polling_unauthenticated_returns_401( - create_polling_schedule, create_default_sources_and_metrics -): +def test_manual_review_polling_unauthenticated_returns_401(create_polling_schedule): client = APIClient() schedule = create_polling_schedule(poll_type="reviews", interval_hours=6) diff --git a/polling/tests/test_poll_metrics.py b/polling/tests/test_poll_metrics.py index de9f7da..667d64f 100644 --- a/polling/tests/test_poll_metrics.py +++ b/polling/tests/test_poll_metrics.py @@ -17,9 +17,7 @@ def test_poll_metrics_calls_fetch_and_saves_values( mock_get_app, mock_create_metric_value, dummy_app, - create_default_sources_and_metrics, ): - # Arrange dummy_app.id = 1 mock_get_app.return_value = dummy_app @@ -36,10 +34,8 @@ def test_poll_metrics_calls_fetch_and_saves_values( polling_service = PollingExecutionService() - # Act polling_service.poll_metrics(app_id=dummy_app.id) - # Assert mock_adapter.fetch.assert_called_once_with(dummy_app, [MetricCode.AVERAGE_RATING]) mock_create_metric_value.assert_called_once() args, kwargs = mock_create_metric_value.call_args diff --git a/polling/tests/test_poll_reviews.py b/polling/tests/test_poll_reviews.py index 3e2ff2c..872abbd 100644 --- a/polling/tests/test_poll_reviews.py +++ b/polling/tests/test_poll_reviews.py @@ -22,12 +22,11 @@ def test_poll_reviews_saves_analyzed_reviews( mock_get_existing_ids, mock_save_reviews, dummy_app, - create_default_sources_and_metrics, ): # Setup mocks mock_get_app.return_value = dummy_app mock_get_existing_ids.return_value = [] - mock_save_reviews.return_value = 1 # 🔧 Aquí és on cal indicar-ho + mock_save_reviews.return_value = 1 mock_adapter = MagicMock() mock_adapter.name = "Reddit" @@ -40,10 +39,8 @@ def test_poll_reviews_saves_analyzed_reviews( polling_execution_service = PollingExecutionService() - # Act results = polling_execution_service.poll_reviews(app_id=dummy_app.id) - # Assert assert len(results) == 1 assert results[0]["saved"] == 1 saved_reviews = mock_save_reviews.call_args[0][2] @@ -68,7 +65,6 @@ def test_poll_reviews_skips_existing_reviews( mock_get_existing_ids, mock_save_reviews, dummy_app, - create_default_sources_and_metrics, ): mock_get_app.return_value = dummy_app mock_get_existing_ids.return_value = ["123"] # 🔁 ja existeix @@ -104,7 +100,6 @@ def test_poll_reviews_handles_failed_analysis_services_gracefully( mock_get_existing_ids, mock_save_reviews, dummy_app, - create_default_sources_and_metrics, ): mock_get_app.return_value = dummy_app mock_get_existing_ids.return_value = [] diff --git a/polling/tests/test_retrieve_polling.py b/polling/tests/test_retrieve_polling.py index d4f560c..625a9ef 100644 --- a/polling/tests/test_retrieve_polling.py +++ b/polling/tests/test_retrieve_polling.py @@ -3,9 +3,7 @@ @pytest.mark.django_db -def test_retrieve_polling_schedule_success( - create_polling_schedule, create_default_sources_and_metrics -): +def test_retrieve_polling_schedule_success(create_polling_schedule): client = APIClient() schedule = create_polling_schedule(poll_type="metrics", interval_hours=6) diff --git a/polling/tests/test_review_analysis.py b/polling/tests/test_review_analysis.py index 7da3941..cd88c02 100644 --- a/polling/tests/test_review_analysis.py +++ b/polling/tests/test_review_analysis.py @@ -3,6 +3,7 @@ import pytest from polling.services.polling_execution_service import PollingExecutionService +from review.models import ReviewPolarity, ReviewType @pytest.fixture @@ -15,14 +16,12 @@ def dummy_reviews(): @pytest.mark.django_db @patch("polling.services.polling_execution_service.requests.post") -def test_analyze_review_polarity_success( - mock_post, dummy_reviews, create_default_sources_and_metrics -): +def test_analyze_review_polarity_success(mock_post, dummy_reviews): mock_post.return_value.status_code = 200 mock_post.return_value.json.return_value = { "reviews": [ - {"reviewId": "1", "polarity": "positive"}, - {"reviewId": "2", "polarity": "negative"}, + {"reviewId": "1", "polarity": ReviewPolarity.POSITIVE}, + {"reviewId": "2", "polarity": ReviewPolarity.NEGATIVE}, ] } @@ -30,18 +29,18 @@ def test_analyze_review_polarity_success( result = service._analyze_review_polarity(dummy_reviews) assert len(result["reviews"]) == 2 - assert result["reviews"][0]["polarity"] == "positive" + assert result["reviews"][0]["polarity"] == ReviewPolarity.POSITIVE assert mock_post.called @pytest.mark.django_db @patch("polling.services.polling_execution_service.requests.post") -def test_analyze_review_type_success(mock_post, dummy_reviews, create_default_sources_and_metrics): +def test_analyze_review_type_success(mock_post, dummy_reviews): mock_post.return_value.status_code = 200 mock_post.return_value.json.return_value = { "reviews": [ - {"reviewId": "1", "type": "feature"}, - {"reviewId": "2", "type": "bug"}, + {"reviewId": "1", "type": ReviewType.FEATURE}, + {"reviewId": "2", "type": ReviewType.BUG}, ] } @@ -49,15 +48,13 @@ def test_analyze_review_type_success(mock_post, dummy_reviews, create_default_so result = service._analyze_review_type(dummy_reviews) assert len(result["reviews"]) == 2 - assert result["reviews"][1]["type"] == "bug" + assert result["reviews"][1]["type"] == ReviewType.BUG assert mock_post.called @pytest.mark.django_db @patch("polling.services.polling_execution_service.requests.post", side_effect=Exception("Timeout")) -def test_analyze_review_handles_exception( - mock_post, dummy_reviews, create_default_sources_and_metrics -): +def test_analyze_review_handles_exception(mock_post, dummy_reviews): service = PollingExecutionService() result = service._analyze_review_polarity(dummy_reviews) diff --git a/polling/urls.py b/polling/urls.py index 5cc5e44..74039dc 100644 --- a/polling/urls.py +++ b/polling/urls.py @@ -5,9 +5,7 @@ urlpatterns = [ path( "apps//polling/", - PollingViewSet.as_view( - {"get": "retrieve_polling", "post": "activate_polling", "delete": "deactivate_polling"} - ), + PollingViewSet.as_view({"get": "retrieve", "post": "activate", "delete": "deactivate"}), name="manage-polling", ), path( diff --git a/polling/views.py b/polling/views.py index 4d724f5..d6d47c4 100644 --- a/polling/views.py +++ b/polling/views.py @@ -40,7 +40,8 @@ class PollingViewSet(ViewSet): tags=["Polling"], methods=["GET"], ) - def retrieve_polling(self, request, id=None): + def retrieve(self, request, id: int): + user_authorized_app_ids = request.user.apps.values_list("id", flat=True) poll_type = request.query_params.get("poll_type") if poll_type not in ["metrics", "reviews"]: return Response( @@ -48,7 +49,7 @@ def retrieve_polling(self, request, id=None): status=status.HTTP_400_BAD_REQUEST, ) - schedule = self.service.get_polling_schedule(id, poll_type) + schedule = self.service.get_polling_schedule(id, poll_type, user_authorized_app_ids) serializer = PollingScheduleSerializer(schedule) return Response(serializer.data, status=status.HTTP_200_OK) @@ -86,8 +87,9 @@ def retrieve_polling(self, request, id=None): tags=["Polling"], methods=["POST"], ) - def activate_polling(self, request, id=None): + def activate(self, request, id: int): try: + user_authorized_app_ids = request.user.apps.values_list("id", flat=True) poll_type = request.query_params.get("poll_type") if poll_type not in ["metrics", "reviews"]: return Response( @@ -95,7 +97,9 @@ def activate_polling(self, request, id=None): status=status.HTTP_400_BAD_REQUEST, ) interval_hours = request.query_params.get("interval_hours") - polling_schedule = self.service.get_polling_schedule(id, poll_type) + polling_schedule = self.service.get_polling_schedule( + id, poll_type, user_authorized_app_ids + ) if ( interval_hours is not None @@ -148,15 +152,18 @@ def activate_polling(self, request, id=None): tags=["Polling"], methods=["DELETE"], ) - def deactivate_polling(self, request, id=None): + def deactivate(self, request, id: int): try: + user_authorized_app_ids = request.user.apps.values_list("id", flat=True) poll_type = request.query_params.get("poll_type") if poll_type not in ["metrics", "reviews"]: return Response( {"detail": "Invalid or missing poll_type. Must be 'metrics' or 'reviews'."}, status=status.HTTP_400_BAD_REQUEST, ) - polling_schedule = self.service.get_polling_schedule(id, poll_type) + polling_schedule = self.service.get_polling_schedule( + id, poll_type, user_authorized_app_ids + ) updated_polling_schedule = self.service.deactivate_polling_schedule(polling_schedule) return Response( PollingScheduleSerializer(updated_polling_schedule).data, status=status.HTTP_200_OK @@ -201,8 +208,14 @@ def deactivate_polling(self, request, id=None): tags=["Polling"], methods=["POST"], ) - def manual_review_polling(self, request, id=None): + def manual_review_polling(self, request, id: int): + user_authorized_app_ids = request.user.apps.values_list("id", flat=True) date_from = request.query_params.get("date_from", None) date_to = request.query_params.get("date_to", None) - self.service.poll_reviews(app_id=id, date_from=date_from, date_to=date_to) + self.service.manual_poll_reviews( + app_id=id, + date_from=date_from, + date_to=date_to, + authorized_app_ids=user_authorized_app_ids, + ) return Response({"detail": "Polling triggered successfully."}, status=200) diff --git a/requirements.txt b/requirements.txt index 3d8bdef..b18f3f8 100644 Binary files a/requirements.txt and b/requirements.txt differ diff --git a/review/migrations/0002_alter_review_polarity.py b/review/migrations/0002_alter_review_polarity.py new file mode 100644 index 0000000..14d9e73 --- /dev/null +++ b/review/migrations/0002_alter_review_polarity.py @@ -0,0 +1,20 @@ +# Generated by Django 5.1.7 on 2025-06-19 11:12 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("review", "0001_initial"), + ] + + operations = [ + migrations.AlterField( + model_name="review", + name="polarity", + field=models.CharField( + choices=[("positive", "Positive"), ("negative", "Negative")], max_length=10 + ), + ), + ] diff --git a/review/models.py b/review/models.py index 908383d..0548ce4 100644 --- a/review/models.py +++ b/review/models.py @@ -7,15 +7,14 @@ class ReviewPolarity(models.TextChoices): POSITIVE = "positive", "Positive" - NEUTRAL = "neutral", "Neutral" NEGATIVE = "negative", "Negative" class ReviewType(models.TextChoices): - BUG = "bug", "Bug" - RATING = "rating", "Rating" - FEATURE = "feature", "Feature" - USER_EXPERIENCE = "user_experience", "User Experience" + BUG = "Bug", "Bug" + RATING = "Rating", "Rating" + FEATURE = "Feature", "Feature" + USER_EXPERIENCE = "UserExperience", "User Experience" class Review(models.Model): diff --git a/review/repositories.py b/review/repositories.py index 1185ad5..471e31d 100644 --- a/review/repositories.py +++ b/review/repositories.py @@ -22,13 +22,21 @@ def get_all(self, filters=None): if date_from: try: - queryset = queryset.filter(date__gte=datetime.fromisoformat(date_from)) + queryset = queryset.filter( + date__gte=timezone.make_aware( + datetime.strptime(filters["date_from"], "%Y-%m-%d") + ) + ) except ValueError: pass if date_to: try: - queryset = queryset.filter(date__lte=datetime.fromisoformat(date_to)) + queryset = queryset.filter( + date__lte=timezone.make_aware( + datetime.strptime(filters["date_to"], "%Y-%m-%d") + ) + ) except ValueError: pass diff --git a/review/tests/test_list_reviews.py b/review/tests/test_list_reviews.py index 0b2df25..65591df 100644 --- a/review/tests/test_list_reviews.py +++ b/review/tests/test_list_reviews.py @@ -1,4 +1,7 @@ +import datetime + import pytest +from django.utils import timezone from rest_framework import status from rest_framework.test import APIClient @@ -45,3 +48,64 @@ def test_list_reviews_only_returns_user_values(self, dummy_user, dummy_review, d reviews = Review.objects.filter(app__user=dummy_user) expected_data = ReviewSerializer(reviews, many=True).data assert response.json() == expected_data + + def test_list_reviews_filter_by_app(self, dummy_user, dummy_review): + client = APIClient() + client.force_authenticate(user=dummy_user) + + # Afegim una review addicional d'una altra app del mateix usuari + Review.objects.create( + review_id="dummy_review_2", + app=dummy_review.app, + source=dummy_review.source, + author="Another Author", + content="Another review", + rating=3.0, + date="2023-10-05T00:00:00Z", + ) + + response = client.get(f"/api/reviews/?app={dummy_review.app.id}") + + assert response.status_code == status.HTTP_200_OK + reviews = Review.objects.filter(app=dummy_review.app) + expected_data = ReviewSerializer(reviews, many=True).data + assert response.json() == expected_data + + def test_list_reviews_filter_by_date_range(self, dummy_user, dummy_review): + client = APIClient() + client.force_authenticate(user=dummy_user) + + # Review dins del rang + Review.objects.create( + review_id="review_in_range", + app=dummy_review.app, + source=dummy_review.source, + author="Author1", + content="In range review", + rating=4.0, + date=timezone.make_aware(datetime.datetime(2023, 10, 15, 0, 0, 0)), + ) + + # Review fora del rang + Review.objects.create( + review_id="review_out_range", + app=dummy_review.app, + source=dummy_review.source, + author="Author2", + content="Out of range", + rating=2.0, + date=timezone.make_aware(datetime.datetime(2023, 5, 1, 0, 0, 0)), + ) + + response = client.get("/api/reviews/?date_from=2023-10-01&date_to=2023-10-31") + assert response.status_code == status.HTTP_200_OK + + # Definim els límits de data amb zona horària + date_from = timezone.make_aware(datetime.datetime(2023, 10, 1, 0, 0, 0)) + date_to = timezone.make_aware(datetime.datetime(2023, 10, 31, 23, 59, 59)) + + reviews = Review.objects.filter( + app__user=dummy_user, date__gte=date_from, date__lte=date_to + ) + expected_data = ReviewSerializer(reviews, many=True).data + assert response.json() == expected_data diff --git a/review/tests/test_models.py b/review/tests/test_models.py index c87ad06..e63fb72 100644 --- a/review/tests/test_models.py +++ b/review/tests/test_models.py @@ -51,7 +51,7 @@ def test_create_review_with_values(): content="Content of the test review", date=now, rating=5, - polarity=ReviewPolarity.NEUTRAL, + polarity=ReviewPolarity.POSITIVE, type=ReviewType.FEATURE, ) assert r.pk is not None @@ -60,5 +60,5 @@ def test_create_review_with_values(): assert r.content == "Content of the test review" assert r.date == now assert r.rating == 5 - assert r.polarity == ReviewPolarity.NEUTRAL + assert r.polarity == ReviewPolarity.POSITIVE assert r.type == ReviewType.FEATURE diff --git a/review/views.py b/review/views.py index 517fac4..cca2ecd 100644 --- a/review/views.py +++ b/review/views.py @@ -1,6 +1,5 @@ -from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema -from rest_framework import status, viewsets -from rest_framework.decorators import action +from drf_spectacular.utils import OpenApiParameter, extend_schema +from rest_framework import viewsets from rest_framework.response import Response from review.services import ReviewService @@ -37,16 +36,3 @@ def list(self, request): reviews = self.service.list_reviews(filters) serializer = ReviewSerializer(reviews, many=True) return Response(serializer.data) - - # ELIMINAR - @extend_schema( - summary="Delete all reviews", - description="Deletes all reviews stored in the system. This action is irreversible.", - methods=["delete"], - responses={204: OpenApiResponse(description="All reviews have been deleted")}, - tags=["Reviews"], - ) - @action(detail=False, methods=["delete"], url_path="delete-all") - def delete_all(self, request): - self.service.delete_all_reviews() - return Response(status=status.HTTP_204_NO_CONTENT) diff --git a/source/adapters/base.py b/source/adapters/base.py index 02cfa2b..6cf2453 100644 --- a/source/adapters/base.py +++ b/source/adapters/base.py @@ -17,8 +17,8 @@ def __init__(self): if not hasattr(self, "code") or not self.code: raise NotImplementedError("Adapter must define 'code' and load source config.") - @abstractmethod - def supports_metric(self, metric: str) -> bool: ... + def supports_metric(self, metric: str) -> bool: + return metric in self.supported_metrics @abstractmethod def fetch(self, app, metrics: list[str]) -> dict[str, str]: ... diff --git a/source/adapters/google_play_scraper.py b/source/adapters/google_play_scraper.py index 165cdf1..80944f4 100644 --- a/source/adapters/google_play_scraper.py +++ b/source/adapters/google_play_scraper.py @@ -22,11 +22,7 @@ def __init__(self): self.url = source_data["url"] self.supported_metrics = source_data["supported_metrics"] - def supports_metric(self, metric: str): - return metric in self.supported_metrics - - @staticmethod - def lookup_app(package_name: str) -> dict | None: + def lookup_app(self, package_name: str) -> dict | None: try: result = gp_app(package_name, lang="en", country="uk") return result diff --git a/source/adapters/itunes.py b/source/adapters/itunes.py index 1844833..6622fec 100644 --- a/source/adapters/itunes.py +++ b/source/adapters/itunes.py @@ -17,9 +17,6 @@ def __init__(self): self.url = source_data["url"] self.supported_metrics = source_data["supported_metrics"] - def supports_metric(self, metric: str): - return metric in self.supported_metrics - def lookup_app(self, appstore_id: str): response = requests.get(f"{self.url}/lookup?id={appstore_id}") if not response.ok: diff --git a/source/adapters/news.py b/source/adapters/news.py index a0a95e4..f011990 100644 --- a/source/adapters/news.py +++ b/source/adapters/news.py @@ -21,9 +21,6 @@ def __init__(self, api_key=None): self.supported_metrics = source_data["supported_metrics"] self.api_key = api_key or os.environ.get("NEWSAPI_KEY") - def supports_metric(self, metric: str) -> bool: - return metric in self.supported_metrics - def fetch(self, app, metrics: list[str]): if not app: return {} diff --git a/source/adapters/reddit.py b/source/adapters/reddit.py index 258f242..535b04f 100644 --- a/source/adapters/reddit.py +++ b/source/adapters/reddit.py @@ -32,9 +32,6 @@ def __init__(self): except praw.exceptions.MissingRequiredAttributeException as e: raise APIException(f"Reddit adapter configuration error: {str(e)}") - def supports_metric(self, metric: str) -> bool: - return metric in self.supported_metrics - def fetch(self, app, metrics: list[str]): if not app: return {} diff --git a/source/constants/source_type.py b/source/constants/source_type.py index acfa338..0434e29 100644 --- a/source/constants/source_type.py +++ b/source/constants/source_type.py @@ -4,4 +4,3 @@ class SourceType(models.TextChoices): API = "api", "API" SCRAPER = "scraper", "Scraper" - EXTERNAL_TOOL = "external_tool", "External Tool" diff --git a/source/repositories.py b/source/repositories.py index 61bae87..530d8f6 100644 --- a/source/repositories.py +++ b/source/repositories.py @@ -3,41 +3,40 @@ class SourceRepository: - @staticmethod - def get_all(): + def get_all(self): return Source.objects.all() - @staticmethod - def get_by_id(source_id): + def get_by_id(self, source_id): return Source.objects.get(id=source_id) - @staticmethod - def create(data): - return Source.objects.create(**data) + def create(self, data): + metrics = data.pop("metrics", []) + source = Source.objects.create(**data) + if metrics: + source.metrics.set(metrics) + return source - @staticmethod - def update(instance, data): + def update(self, instance, data): + metrics = data.pop("metrics", None) for attr, value in data.items(): setattr(instance, attr, value) instance.save() + if metrics is not None: + instance.metrics.set(metrics) return instance - @staticmethod - def delete(instance): + def delete(self, instance): instance.delete() - @staticmethod - def add_metrics(instance, metrics_ids): + def add_metrics(self, instance, metrics_ids): instance.metrics.add(*metrics_ids) instance.save() return instance - @staticmethod - def remove_metrics(instance, metrics_ids): + def remove_metrics(self, instance, metrics_ids): instance.metrics.remove(*metrics_ids) instance.save() return instance - @staticmethod - def get_by_code(code: str): + def get_by_code(self, code: str): return Source.objects.prefetch_related("metrics").get(code=code) diff --git a/source/serializers.py b/source/serializers.py index 10814a2..309792b 100644 --- a/source/serializers.py +++ b/source/serializers.py @@ -1,5 +1,7 @@ from rest_framework import serializers +from metric.models import Metric + from .constants.source_type import SourceType from .models import Source @@ -10,10 +12,12 @@ class SourceSerializer(serializers.ModelSerializer): help_text=( "Type of the source. Available options:\n" "- 'api': Data source accessible through a public API.\n" - "- 'scraper': Data source obtained via web scraping.\n" - "- 'external_tool': Data provided by an external tool." + "- 'scraper': Data source obtained via web scraping." ), ) + metrics = serializers.PrimaryKeyRelatedField( + queryset=Metric.objects.all(), many=True, required=False + ) class Meta: model = Source @@ -25,10 +29,3 @@ class Meta: "url", "metrics", ] - read_only_fields = ["metrics"] - - -class LinkSourceSerializer(serializers.Serializer): - sources = serializers.ListField( - child=serializers.IntegerField(), help_text="List of source IDs to link to the metric." - ) diff --git a/source/services.py b/source/services.py index db2004f..0f83f45 100644 --- a/source/services.py +++ b/source/services.py @@ -28,12 +28,6 @@ def update_source(self, instance, validated_data): def delete_source(self, instance): return self.repo.delete(instance) - def add_metrics(self, instance, metrics_ids): - return self.repo.add_metrics(instance, metrics_ids) - - def remove_metrics(self, instance, metrics_ids): - return self.repo.remove_metrics(instance, metrics_ids) - def get_source_data(self, code: str) -> dict: try: source = self.repo.get_by_code(code) diff --git a/source/tests/test_adapter_fetch_behavior.py b/source/tests/test_adapter_fetch_behavior.py index f3288c8..8ffbf02 100644 --- a/source/tests/test_adapter_fetch_behavior.py +++ b/source/tests/test_adapter_fetch_behavior.py @@ -5,7 +5,6 @@ @pytest.mark.django_db -@pytest.mark.usefixtures("create_default_sources_and_metrics") def test_adapter_fetch_returns_dict(dummy_app): adapters = SourceService.load_sources() for adapter in adapters: diff --git a/source/tests/test_adapters_contract.py b/source/tests/test_adapters_contract.py index cf3dd79..1f0565f 100644 --- a/source/tests/test_adapters_contract.py +++ b/source/tests/test_adapters_contract.py @@ -4,7 +4,6 @@ @pytest.mark.django_db -@pytest.mark.usefixtures("create_default_sources_and_metrics") def test_all_adapters_have_required_methods(): adapters = SourceService.load_sources() for adapter in adapters: diff --git a/source/tests/test_adapters_registry.py b/source/tests/test_adapters_registry.py index 6258351..a3515f0 100644 --- a/source/tests/test_adapters_registry.py +++ b/source/tests/test_adapters_registry.py @@ -5,7 +5,6 @@ @pytest.mark.django_db -@pytest.mark.usefixtures("create_default_sources_and_metrics") def test_adapter_codes_are_unique(): adapters = SourceService.load_sources() codes = [adapter.code for adapter in adapters] @@ -13,8 +12,7 @@ def test_adapter_codes_are_unique(): @pytest.mark.django_db -@pytest.mark.usefixtures("create_default_sources_and_metrics") -def test_each_adapter_has_a_matching_source_in_db(create_default_sources_and_metrics): +def test_each_adapter_has_a_matching_source_in_db(): adapters = SourceService.load_sources() for adapter in adapters: assert Source.objects.filter( diff --git a/source/tests/test_all_adapters.py b/source/tests/test_all_adapters.py index 3610b30..22f732b 100644 --- a/source/tests/test_all_adapters.py +++ b/source/tests/test_all_adapters.py @@ -14,9 +14,7 @@ @pytest.mark.django_db @pytest.mark.parametrize("adapter_class", SourceAdapter.__subclasses__()) -def test_fetch_returns_dict_of_strings( - adapter_class, dummy_app, create_default_sources_and_metrics -): +def test_fetch_returns_dict_of_strings(adapter_class, dummy_app): adapter = adapter_class() if not adapter.supported_metrics: diff --git a/source/tests/test_create_sources.py b/source/tests/test_create_sources.py index 3e81c13..7fca0c6 100644 --- a/source/tests/test_create_sources.py +++ b/source/tests/test_create_sources.py @@ -68,3 +68,38 @@ def test_create_source_missing_fields(self, dummy_superuser): assert ( "name" in response.json()["errors"][0].lower() or "name" in str(response.json()).lower() ) + + def test_create_source_with_metric(self, dummy_superuser, dummy_metric): + client = APIClient() + client.force_authenticate(user=dummy_superuser) + + payload = { + "code": "test_source", + "name": "Test Source", + "type": SourceType.API, + "url": "https://example.com", + "metrics": [dummy_metric.id], + } + + response = client.post("/api/sources/", data=payload, format="json") + + assert response.status_code == status.HTTP_201_CREATED + assert Source.objects.filter(name="Test Source").exists() + + def test_create_source_with_nonexistent_metric(self, dummy_superuser): + client = APIClient() + client.force_authenticate(user=dummy_superuser) + + payload = { + "code": "test_source", + "name": "Test Source", + "type": SourceType.API, + "url": "https://example.com", + "metrics": [9999], + } + + response = client.post("/api/sources/", data=payload, format="json") + + assert response.status_code == status.HTTP_400_BAD_REQUEST + assert ("errors") in response.data + assert "Invalid pk" in response.data["errors"][0] diff --git a/source/tests/test_google_play_scraper.py b/source/tests/test_google_play_scraper.py index e099dcd..7fabedf 100644 --- a/source/tests/test_google_play_scraper.py +++ b/source/tests/test_google_play_scraper.py @@ -9,7 +9,6 @@ @pytest.mark.django_db -@pytest.mark.usefixtures("create_default_sources_and_metrics") class TestGooglePlayScraperFetch: @patch("source.adapters.google_play_scraper.gp_app") def test_fetch_supported_metrics(self, mock_gp_app, dummy_app): @@ -30,7 +29,6 @@ def test_fetch_supported_metrics(self, mock_gp_app, dummy_app): MetricCode.AVERAGE_RATING, MetricCode.TOTAL_REVIEWS, MetricCode.TOTAL_DOWNLOADS, - MetricCode.LAST_UPDATE_DATE, ], ) @@ -38,7 +36,6 @@ def test_fetch_supported_metrics(self, mock_gp_app, dummy_app): MetricCode.AVERAGE_RATING: "4.6", MetricCode.TOTAL_REVIEWS: "50000", MetricCode.TOTAL_DOWNLOADS: "1000000", - MetricCode.LAST_UPDATE_DATE: "2024-12-10", } @patch("source.adapters.google_play_scraper.gp_app") @@ -53,7 +50,6 @@ def test_fetch_unsupported_metric_returns_empty(self, mock_gp_app, dummy_app): @pytest.mark.django_db -@pytest.mark.usefixtures("create_default_sources_and_metrics") class TestGooglePlayScraperFetchReviews: @patch("source.adapters.google_play_scraper.reviews") def test_fetch_reviews_within_range(self, mock_reviews, dummy_app): diff --git a/source/tests/test_itunes_adapter.py b/source/tests/test_itunes_adapter.py index b868d22..1cf564c 100644 --- a/source/tests/test_itunes_adapter.py +++ b/source/tests/test_itunes_adapter.py @@ -7,7 +7,6 @@ @pytest.mark.django_db -@pytest.mark.usefixtures("create_default_sources_and_metrics") class TestItunesSearchAPIAdapter: @patch("source.adapters.itunes.requests.get") def test_fetch_supported_metrics(self, mock_requests_get, dummy_app): diff --git a/source/tests/test_load_sources.py b/source/tests/test_load_sources.py index 6e5950e..734053b 100644 --- a/source/tests/test_load_sources.py +++ b/source/tests/test_load_sources.py @@ -4,7 +4,7 @@ @pytest.mark.django_db -@pytest.mark.usefixtures("create_default_sources_and_metrics") +@pytest.mark.usefixtures() def test_load_sources_returns_valid_adapters(): adapters = SourceService.load_sources() assert adapters, "No adapters loaded" diff --git a/source/tests/test_news.py b/source/tests/test_news.py index 3ca307a..4b6c93e 100644 --- a/source/tests/test_news.py +++ b/source/tests/test_news.py @@ -7,7 +7,6 @@ @pytest.mark.django_db -@pytest.mark.usefixtures("create_default_sources_and_metrics") class TestNewsAPIAdapter: @patch("source.adapters.news.requests.get") def test_fetch_supported_metric(self, mock_requests_get, dummy_app): diff --git a/source/tests/test_reddit.py b/source/tests/test_reddit.py index 8a96007..a7395c5 100644 --- a/source/tests/test_reddit.py +++ b/source/tests/test_reddit.py @@ -7,7 +7,6 @@ @pytest.mark.django_db -@pytest.mark.usefixtures("create_default_sources_and_metrics") class TestRedditAPIAdapter: @patch("praw.Reddit") def test_fetch_supported_metric(self, mock_reddit_class, dummy_app): diff --git a/source/tests/test_update_sources.py b/source/tests/test_update_sources.py index ea1e7b5..6f454ca 100644 --- a/source/tests/test_update_sources.py +++ b/source/tests/test_update_sources.py @@ -59,3 +59,38 @@ def test_update_source_invalid_data(self, dummy_source, dummy_superuser): response = client.put(f"/api/sources/{dummy_source.id}/", data=payload, format="json") assert response.status_code == status.HTTP_400_BAD_REQUEST assert "name" in str(response.json()).lower() + + def test_update_source_metric(self, dummy_source, dummy_superuser, dummy_metric): + client = APIClient() + client.force_authenticate(user=dummy_superuser) + + payload = { + "code": dummy_source.code, + "name": "Updated Name", + "type": SourceType.SCRAPER, + "url": "https://exampleupdated.com", + "metrics": [dummy_metric.id], + } + + response = client.put(f"/api/sources/{dummy_source.id}/", data=payload, format="json") + assert response.status_code == status.HTTP_200_OK, f"Errors: {response.json()}" + + assert response.json()["name"] == "Updated Name" + assert response.json()["url"] == "https://exampleupdated.com" + + def test_update_source_nonexistent_metric(self, dummy_source, dummy_superuser): + client = APIClient() + client.force_authenticate(user=dummy_superuser) + + payload = { + "code": dummy_source.code, + "name": "Updated Name", + "type": SourceType.SCRAPER, + "url": "https://exampleupdated.com", + "metrics": [9999], + } + + response = client.put(f"/api/sources/{dummy_source.id}/", data=payload, format="json") + assert response.status_code == status.HTTP_400_BAD_REQUEST + assert ("errors") in response.data + assert "Invalid pk" in response.data["errors"][0] diff --git a/source/views.py b/source/views.py index 620772f..cc46b20 100644 --- a/source/views.py +++ b/source/views.py @@ -1,9 +1,7 @@ from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema from rest_framework import status, viewsets -from rest_framework.decorators import action from rest_framework.response import Response -from metric.serializers import LinkMetricsSerializer from permissions.decorators import superuser_required from schemas.responses import ( BAD_REQUEST_RESPONSE, @@ -119,56 +117,3 @@ def destroy(self, request, pk=None): source = self.service.get_source(pk) self.service.delete_source(source) return Response(status=status.HTTP_204_NO_CONTENT) - - @extend_schema( - summary="Add metrics to a source", - description=( - "Adds a list of metric IDs to the given source," - " without removing existing associations.\n\n" - "The input must include a `metrics` field with a list of IDs." - ), - request=LinkMetricsSerializer, - parameters=[OpenApiParameter(name="id", required=True, type=int, location="path")], - responses=SourceSerializer, - tags=["Sources"], - ) - @superuser_required - @action(detail=True, methods=["post"], url_path="metrics") - def add_metrics(self, request, pk=None): - metrics_ids = request.data.get("metrics", []) - - if not isinstance(metrics_ids, list): - return Response( - {"error": "metrics must be a list of IDs"}, status=status.HTTP_400_BAD_REQUEST - ) - - source = self.service.get_source(pk) - source = self.service.add_metrics(source, metrics_ids) - - return Response(SourceSerializer(source).data, status=status.HTTP_200_OK) - - @extend_schema( - summary="Remove metrics from a source", - description=( - "Removes the specified metric IDs from the given source.\n\n" - "The input must include a `metrics` field with a list of IDs." - ), - request=LinkMetricsSerializer, - parameters=[OpenApiParameter(name="id", required=True, type=int, location="path")], - responses=SourceSerializer, - tags=["Sources"], - ) - @superuser_required - @action(detail=True, methods=["delete"], url_path="metrics") - def remove_metrics(self, request, pk=None): - metrics_ids = request.data.get("metrics", []) - - if not isinstance(metrics_ids, list): - return Response( - {"error": "metrics must be a list of IDs"}, status=status.HTTP_400_BAD_REQUEST - ) - - source = self.service.get_source(pk) - source = self.service.remove_metrics(source, metrics_ids) - - return Response(SourceSerializer(source).data, status=status.HTTP_200_OK) diff --git a/users/tests.py b/users/tests/__init__.py similarity index 100% rename from users/tests.py rename to users/tests/__init__.py diff --git a/users/tests/test_api_key.py b/users/tests/test_api_key.py new file mode 100644 index 0000000..563a481 --- /dev/null +++ b/users/tests/test_api_key.py @@ -0,0 +1,41 @@ +import pytest +from rest_framework import status +from rest_framework.test import APIClient + + +@pytest.mark.django_db +def test_generate_api_key_success(): + client = APIClient() + + # Registre + client.post( + "/api/users/register/", + data={ + "username": "apikeyuser", + "email": "apikeyuser@example.com", + "password": "securepassword123", + }, + format="json", + ) + + # Login + login_response = client.post( + "/api/users/token/", + data={ + "username": "apikeyuser", + "password": "securepassword123", + }, + format="json", + ) + + assert login_response.status_code == status.HTTP_200_OK + access_token = login_response.json()["access"] + + client.credentials(HTTP_AUTHORIZATION=f"Bearer {access_token}") + + # Crida a l’endpoint d’API Key + response = client.post("/api/users/token/api/", format="json") + + assert response.status_code == status.HTTP_200_OK # abans era 201 + assert "token" in response.json() + assert len(response.json()["token"]) > 10 diff --git a/users/tests/test_unauthorized.py b/users/tests/test_unauthorized.py new file mode 100644 index 0000000..2b2448f --- /dev/null +++ b/users/tests/test_unauthorized.py @@ -0,0 +1,10 @@ +import pytest +from rest_framework import status +from rest_framework.test import APIClient + + +@pytest.mark.django_db +def test_protected_endpoint_requires_authentication(): + client = APIClient() + response = client.get("/api/apps/") + assert response.status_code == status.HTTP_401_UNAUTHORIZED diff --git a/users/tests/test_user_login.py b/users/tests/test_user_login.py new file mode 100644 index 0000000..ec1250f --- /dev/null +++ b/users/tests/test_user_login.py @@ -0,0 +1,27 @@ +import pytest +from rest_framework import status +from rest_framework.test import APIClient + + +@pytest.mark.django_db +def test_login_user_success(): + client = APIClient() + + payload = { + "username": "newuser", + "email": "newuser@example.com", + "password": "securepassword123", + } + + client.post("/api/users/register/", data=payload, format="json") + + payload = { + "username": "newuser", + "password": "securepassword123", # El que uses al fixture + } + + response = client.post("/api/users/token/", data=payload, format="json") + + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert "access" in data and "refresh" in data diff --git a/users/tests/test_user_register.py b/users/tests/test_user_register.py new file mode 100644 index 0000000..b1e85bb --- /dev/null +++ b/users/tests/test_user_register.py @@ -0,0 +1,20 @@ +import pytest +from rest_framework import status +from rest_framework.test import APIClient + + +@pytest.mark.django_db +def test_register_user_success(): + client = APIClient() + + payload = { + "username": "newuser", + "email": "newuser@example.com", + "password": "securepassword123", + } + + response = client.post("/api/users/register/", data=payload, format="json") + + assert response.status_code == status.HTTP_201_CREATED + data = response.json() + assert "access" in data and "refresh" in data diff --git a/users/tests/test_user_roles.py b/users/tests/test_user_roles.py new file mode 100644 index 0000000..2401696 --- /dev/null +++ b/users/tests/test_user_roles.py @@ -0,0 +1,44 @@ +import pytest +from rest_framework import status +from rest_framework.test import APIClient + + +@pytest.mark.django_db +class TestUserRoleDifferentiation: + def test_roles_are_differentiated(self, dummy_user, dummy_superuser): + assert not dummy_user.is_superuser + assert dummy_superuser.is_superuser + + def test_normal_user_has_limited_permissions(self, dummy_user): + client = APIClient() + client.force_authenticate(user=dummy_user) + + response = client.get("/api/apps/") + assert response.status_code == status.HTTP_200_OK + + payload = { + "code": "test_metric", + "name": "Test Metric", + "value_type": "integer", + "description": "A test metric.", + "is_derived": False, + } + response = client.post("/api/metrics/", data=payload, format="json") + assert response.status_code == status.HTTP_403_FORBIDDEN + + def test_superuser_has_extended_permissions(self, dummy_superuser): + client = APIClient() + client.force_authenticate(user=dummy_superuser) + + response = client.get("/api/apps/") + assert response.status_code == status.HTTP_200_OK + + payload = { + "code": "test_metric", + "name": "Test Metric", + "value_type": "integer", + "description": "A test metric.", + "is_derived": False, + } + response = client.post("/api/metrics/", data=payload, format="json") + assert response.status_code == status.HTTP_201_CREATED diff --git a/users/urls.py b/users/urls.py index 278e27f..257bbac 100644 --- a/users/urls.py +++ b/users/urls.py @@ -4,10 +4,11 @@ TokenRefreshView, ) -from .views import RegisterView +from .views import GenerateApiTokenView, RegisterView urlpatterns = [ path("register/", RegisterView.as_view(), name="user-register"), path("token/", TokenObtainPairView.as_view(), name="token_obtain_pair"), path("token/refresh/", TokenRefreshView.as_view(), name="token_refresh"), + path("token/api/", GenerateApiTokenView.as_view(), name="token_api"), ] diff --git a/users/views.py b/users/views.py index b644cf8..1f26947 100644 --- a/users/views.py +++ b/users/views.py @@ -1,6 +1,7 @@ from drf_spectacular.utils import extend_schema from rest_framework import status -from rest_framework.permissions import AllowAny +from rest_framework.authtoken.models import Token +from rest_framework.permissions import AllowAny, IsAuthenticated from rest_framework.response import Response from rest_framework.views import APIView @@ -37,3 +38,26 @@ def post(self, request): tokens = self.service.register_user(serializer.validated_data) return Response(tokens, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + +class GenerateApiTokenView(APIView): + permission_classes = [IsAuthenticated] + + @extend_schema( + summary="Obtenir l'API Token", + description=( + "Retorna el token d’autenticació per accés programàtic (API Token).\n\n" + "Aquest token és persistent i no expira, a diferència dels tokens JWT." + ), + responses={ + 200: { + "type": "object", + "properties": { + "token": {"type": "string"}, + }, + }, + }, + ) + def post(self, request): + token, _ = Token.objects.get_or_create(user=request.user) + return Response({"token": token.key}, status=status.HTTP_200_OK)