diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 4d857c7b97a1..9f4b0fddb509 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -20,6 +20,7 @@ jobs: name: ${{ matrix.shard_name }}(py=${{ matrix.python-version }},dj=${{ matrix.django-version }},mongo=${{ matrix.mongo-version }}) runs-on: ${{ matrix.os-version }} strategy: + fail-fast: false matrix: python-version: - "3.11" diff --git a/lms/djangoapps/courseware/tests/test_submitting_problems.py b/lms/djangoapps/courseware/tests/test_submitting_problems.py index 6a49e3bccfda..b0c1543f110c 100644 --- a/lms/djangoapps/courseware/tests/test_submitting_problems.py +++ b/lms/djangoapps/courseware/tests/test_submitting_problems.py @@ -29,7 +29,6 @@ SchematicResponseXMLFactory ) from xmodule.capa.tests.test_util import UseUnsafeCodejail -from xmodule.capa.xqueue_interface import XQueueInterface from common.djangoapps.course_modes.models import CourseMode from lms.djangoapps.courseware.models import BaseStudentModuleHistory, StudentModule from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase @@ -43,6 +42,11 @@ from xmodule.modulestore.tests.factories import CourseFactory, BlockFactory # lint-amnesty, pylint: disable=wrong-import-order from xmodule.partitions.partitions import Group, UserPartition # lint-amnesty, pylint: disable=wrong-import-order +if settings.USE_EXTRACTED_PROBLEM_BLOCK: + from xblocks_contrib.problem.capa.xqueue_interface import XQueueInterface +else: + from xmodule.capa.xqueue_interface import XQueueInterface + class ProblemSubmissionTestMixin(TestCase): """ diff --git a/lms/djangoapps/instructor_task/tests/test_integration.py b/lms/djangoapps/instructor_task/tests/test_integration.py index d7e6de32b3f4..f2208d8b4b39 100644 --- a/lms/djangoapps/instructor_task/tests/test_integration.py +++ b/lms/djangoapps/instructor_task/tests/test_integration.py @@ -16,6 +16,7 @@ import pytest import ddt from celery.states import FAILURE, SUCCESS +from django.conf import settings from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user from django.test.utils import override_settings from django.urls import reverse @@ -46,6 +47,8 @@ from xmodule.modulestore.tests.factories import BlockFactory # lint-amnesty, pylint: disable=wrong-import-order from common.test.utils import assert_dict_contains_subset +CAPA = "xblocks_contrib.problem" if settings.USE_EXTRACTED_PROBLEM_BLOCK else "xmodule" + log = logging.getLogger(__name__) @@ -276,7 +279,7 @@ def test_rescoring_failure(self): self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1]) expected_message = "bad things happened" - with patch('xmodule.capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore: + with patch(f'{CAPA}.capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore: mock_rescore.side_effect = ZeroDivisionError(expected_message) instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name) self._assert_task_failure( @@ -296,7 +299,7 @@ def test_rescoring_bad_unicode_input(self): # return an input error as if it were a numerical response, with an embedded unicode character: expected_message = "Could not interpret '2/3\u03a9' as a number" - with patch('xmodule.capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore: + with patch(f'{CAPA}.capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore: mock_rescore.side_effect = StudentInputError(expected_message) instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name) diff --git a/openedx/envs/common.py b/openedx/envs/common.py index c93ffd59b3b9..1f3aa4a58751 100644 --- a/openedx/envs/common.py +++ b/openedx/envs/common.py @@ -2095,7 +2095,7 @@ def add_optional_apps(optional_apps, installed_apps): # .. toggle_warning: Not production-ready until relevant subtask https://github.com/openedx/edx-platform/issues/34827 is done. # .. toggle_creation_date: 2024-11-10 # .. toggle_target_removal_date: 2025-06-01 -USE_EXTRACTED_PROBLEM_BLOCK = False +USE_EXTRACTED_PROBLEM_BLOCK = True # .. toggle_name: USE_EXTRACTED_VIDEO_BLOCK # .. toggle_default: False diff --git a/requirements/edx/base.txt b/requirements/edx/base.txt index 96a3d75845ce..55e8f6e7b073 100644 --- a/requirements/edx/base.txt +++ b/requirements/edx/base.txt @@ -1267,7 +1267,7 @@ xblock-utils==4.0.0 # via # edx-sga # xblock-poll -xblocks-contrib==0.10.2 +git+https://github.com/openedx/xblocks-contrib.git@fix-problemblock # via -r requirements/edx/bundled.in xmlsec==1.3.14 # via diff --git a/requirements/edx/development.txt b/requirements/edx/development.txt index 2f9dbe37e3d6..9f9d15576695 100644 --- a/requirements/edx/development.txt +++ b/requirements/edx/development.txt @@ -2299,7 +2299,7 @@ xblock-utils==4.0.0 # -r requirements/edx/testing.txt # edx-sga # xblock-poll -xblocks-contrib==0.10.2 +git+https://github.com/openedx/xblocks-contrib.git@fix-problemblock # via # -r requirements/edx/doc.txt # -r requirements/edx/testing.txt diff --git a/requirements/edx/doc.txt b/requirements/edx/doc.txt index 0414d0c49421..f470a392dd48 100644 --- a/requirements/edx/doc.txt +++ b/requirements/edx/doc.txt @@ -1609,7 +1609,7 @@ xblock-utils==4.0.0 # -r requirements/edx/base.txt # edx-sga # xblock-poll -xblocks-contrib==0.10.2 +git+https://github.com/openedx/xblocks-contrib.git@fix-problemblock # via -r requirements/edx/base.txt xmlsec==1.3.14 # via diff --git a/requirements/edx/testing.txt b/requirements/edx/testing.txt index b66725171b7f..7da842de173b 100644 --- a/requirements/edx/testing.txt +++ b/requirements/edx/testing.txt @@ -1699,7 +1699,7 @@ xblock-utils==4.0.0 # -r requirements/edx/base.txt # edx-sga # xblock-poll -xblocks-contrib==0.10.2 +git+https://github.com/openedx/xblocks-contrib.git@fix-problemblock # via -r requirements/edx/base.txt xmlsec==1.3.14 # via diff --git a/xmodule/tests/test_capa_block.py b/xmodule/tests/test_capa_block.py index 3a9bed6c73d5..5e40818f8c17 100644 --- a/xmodule/tests/test_capa_block.py +++ b/xmodule/tests/test_capa_block.py @@ -8,7 +8,6 @@ import os import random import textwrap -import unittest from unittest.mock import DEFAULT, Mock, PropertyMock, patch from zoneinfo import ZoneInfo @@ -17,7 +16,8 @@ import requests import webob from codejail.safe_exec import SafeExecException -from django.test import override_settings +from django.conf import settings +from django.test import TestCase, override_settings from django.utils.encoding import smart_str from lxml import etree from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator @@ -30,20 +30,24 @@ from lms.djangoapps.courseware.user_state_client import XBlockUserState from openedx.core.djangolib.testing.utils import skip_unless_lms from xmodule.capa import responsetypes -from xmodule.capa.correctmap import CorrectMap -from xmodule.capa.responsetypes import ( - LoncapaProblemError, - ResponseError, - StudentInputError, -) from xmodule.capa.tests.test_util import UseUnsafeCodejail -from xmodule.capa.xqueue_interface import XQueueInterface from xmodule.capa_block import ComplexEncoder, ProblemBlock from xmodule.tests import DATA_DIR from ..capa_block import RANDOMIZATION, SHOWANSWER from . import get_test_system +if settings.USE_EXTRACTED_PROBLEM_BLOCK: + CAPA = "xblocks_contrib.problem" + from xblocks_contrib.problem.capa.correctmap import CorrectMap + from xblocks_contrib.problem.capa.responsetypes import LoncapaProblemError, ResponseError, StudentInputError + from xblocks_contrib.problem.capa.xqueue_interface import XQueueInterface +else: + CAPA = "xmodule" + from xmodule.capa.correctmap import CorrectMap + from xmodule.capa.responsetypes import LoncapaProblemError, ResponseError, StudentInputError + from xmodule.capa.xqueue_interface import XQueueInterface + class CapaFactory: """ @@ -203,9 +207,11 @@ class CapaFactoryWithFiles(CapaFactory): @ddt.ddt @skip_unless_lms -class ProblemBlockTest(unittest.TestCase): # pylint: disable=too-many-public-methods +class _ProblemBlockTestBase(TestCase): # pylint: disable=too-many-public-methods """Tests for various problem types in XBlocks.""" + __test__ = False + def setUp(self): super().setUp() @@ -342,6 +348,8 @@ def test_showanswer_hide_correctness( """ Ensure that the answer will not be shown when correctness is being hidden. """ + problem_data = problem_data.copy() + if "due" in problem_data: problem_data["due"] = getattr(self, problem_data["due"]) problem = CapaFactory.create(**problem_data) @@ -688,6 +696,7 @@ def test_show_correctness_past_due(self, problem_data, expected_result): Test that with show_correctness="past_due", correctness will only be visible after the problem is closed for everyone--e.g. after due date + grace period. """ + problem_data = problem_data.copy() problem_data["due"] = getattr(self, problem_data["due"]) if "graceperiod" in problem_data: problem_data["graceperiod"] = getattr(self, problem_data["graceperiod"]) @@ -811,8 +820,8 @@ def test_submit_problem_correct(self): # Simulate that all answers are marked correct, no matter # what the input is, by patching CorrectMap.is_correct() # Also simulate rendering the HTML - with patch("xmodule.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: - with patch("xmodule.capa_block.ProblemBlock.get_problem_html") as mock_html: + with patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: + with patch(f"{CAPA}.capa_block.ProblemBlock.get_problem_html") as mock_html: mock_is_correct.return_value = True mock_html.return_value = "Test HTML" @@ -831,8 +840,8 @@ def test_submit_problem_correct(self): # and that this was considered attempt number 2 for grading purposes assert block.lcp.context["attempt"] == 2 - @patch("xmodule.capa.correctmap.CorrectMap.is_correct") - @patch("xmodule.capa_block.ProblemBlock.get_problem_html") + @patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") + @patch(f"{CAPA}.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_with_grading_method_disable( self, mock_html: Mock, mock_is_correct: Mock ): @@ -873,8 +882,8 @@ def test_submit_problem_with_grading_method_disable( assert block.lcp.context["attempt"] == 3 assert block.score == Score(raw_earned=1, raw_possible=1) - @patch("xmodule.capa.correctmap.CorrectMap.is_correct") - @patch("xmodule.capa_block.ProblemBlock.get_problem_html") + @patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") + @patch(f"{CAPA}.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_with_grading_method_enable(self, mock_html: Mock, mock_is_correct: Mock): """ Test that the grading method is enabled when submit a problem. @@ -895,8 +904,8 @@ def test_submit_problem_with_grading_method_enable(self, mock_html: Mock, mock_i assert block.score == Score(raw_earned=1, raw_possible=1) mock_get_score.assert_called() - @patch("xmodule.capa.correctmap.CorrectMap.is_correct") - @patch("xmodule.capa_block.ProblemBlock.get_problem_html") + @patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") + @patch(f"{CAPA}.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_grading_method_always_enabled(self, mock_html: Mock, mock_is_correct: Mock): """ Test problem submission when grading method is always enabled by default. @@ -948,8 +957,8 @@ def test_submit_problem_grading_method_always_enabled(self, mock_html: Mock, moc assert block.lcp.context["attempt"] == 4 assert block.score == Score(raw_earned=1, raw_possible=1) - @patch("xmodule.capa.correctmap.CorrectMap.is_correct") - @patch("xmodule.capa_block.ProblemBlock.get_problem_html") + @patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") + @patch(f"{CAPA}.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_grading_method_always_enabled_highest_score(self, mock_html: Mock, mock_is_correct: Mock): """ Test problem submission when grading method is always enabled by default @@ -1001,8 +1010,8 @@ def test_submit_problem_grading_method_always_enabled_highest_score(self, mock_h assert block.lcp.context["attempt"] == 4 assert block.score == Score(raw_earned=1, raw_possible=1) - @patch("xmodule.capa.correctmap.CorrectMap.is_correct") - @patch("xmodule.capa_block.ProblemBlock.get_problem_html") + @patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") + @patch(f"{CAPA}.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_correct_last_score(self, mock_html: Mock, mock_is_correct: Mock): """ Test the `last_score` grading method. @@ -1034,8 +1043,8 @@ def test_submit_problem_correct_last_score(self, mock_html: Mock, mock_is_correc assert block.lcp.context["attempt"] == 2 assert block.score == Score(raw_earned=0, raw_possible=1) - @patch("xmodule.capa.correctmap.CorrectMap.is_correct") - @patch("xmodule.capa_block.ProblemBlock.get_problem_html") + @patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") + @patch(f"{CAPA}.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_correct_highest_score(self, mock_html: Mock, mock_is_correct: Mock): """ Test the `highest_score` grading method. @@ -1066,7 +1075,7 @@ def test_submit_problem_correct_highest_score(self, mock_html: Mock, mock_is_cor assert block.lcp.context["attempt"] == 2 assert block.score == Score(raw_earned=1, raw_possible=1) - @patch("xmodule.capa.correctmap.CorrectMap.is_correct") + @patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") @patch("xmodule.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_correct_first_score(self, mock_html: Mock, mock_is_correct: Mock): """ @@ -1098,8 +1107,8 @@ def test_submit_problem_correct_first_score(self, mock_html: Mock, mock_is_corre assert block.lcp.context["attempt"] == 2 assert block.score == Score(raw_earned=0, raw_possible=1) - @patch("xmodule.capa.correctmap.CorrectMap.is_correct") - @patch("xmodule.capa_block.ProblemBlock.get_problem_html") + @patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") + @patch(f"{CAPA}.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_correct_average_score(self, mock_html: Mock, mock_is_correct: Mock): """ Test the `average_score` grading method. @@ -1156,7 +1165,7 @@ def test_submit_problem_incorrect(self): block = CapaFactory.create(attempts=0) # Simulate marking the input incorrect - with patch("xmodule.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: + with patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: mock_is_correct.return_value = False # Check the problem @@ -1177,7 +1186,7 @@ def test_submit_problem_closed(self): # Problem closed -- cannot submit # Simulate that ProblemBlock.closed() always returns True - with patch("xmodule.capa_block.ProblemBlock.closed") as mock_closed: + with patch(f"{CAPA}.capa_block.ProblemBlock.closed") as mock_closed: mock_closed.return_value = True with pytest.raises(NotFoundError): get_request_dict = {CapaFactory.input_key(): "3.14"} @@ -1225,7 +1234,7 @@ def test_submit_problem_queued(self): # Simulate that the problem is queued multipatch = patch.multiple( - "xmodule.capa.capa_problem.LoncapaProblem", is_queued=DEFAULT, get_recentmost_queuetime=DEFAULT + f"{CAPA}.capa.capa_problem.LoncapaProblem", is_queued=DEFAULT, get_recentmost_queuetime=DEFAULT ) with multipatch as values: values["is_queued"].return_value = True @@ -1353,7 +1362,7 @@ def test_submit_problem_error(self): block = CapaFactory.create(attempts=1, user_is_staff=False) # Simulate answering a problem that raises the exception - with patch("xmodule.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: + with patch(f"{CAPA}.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: mock_grade.side_effect = exception_class("test error") get_request_dict = {CapaFactory.input_key(): "3.14"} @@ -1380,7 +1389,7 @@ def test_submit_problem_error_with_codejail_exception(self): block = CapaFactory.create(attempts=1, user_is_staff=False) # Simulate a codejail exception "Exception: Couldn't execute jailed code" - with patch("xmodule.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: + with patch(f"{CAPA}.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: try: raise ResponseError( "Couldn't execute jailed code: stdout: '', " @@ -1416,7 +1425,7 @@ def test_submit_problem_other_errors(self): block.runtime.is_author_mode = True # Simulate answering a problem that raises the exception - with patch("xmodule.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: + with patch(f"{CAPA}.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: error_msg = "Superterrible error happened: ☠" mock_grade.side_effect = Exception(error_msg) @@ -1450,7 +1459,7 @@ def test_submit_problem_error_nonascii(self): block = CapaFactory.create(attempts=1, user_is_staff=False) # Simulate answering a problem that raises the exception - with patch("xmodule.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: + with patch(f"{CAPA}.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: mock_grade.side_effect = exception_class("ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ") get_request_dict = {CapaFactory.input_key(): "3.14"} @@ -1475,7 +1484,7 @@ def test_submit_problem_error_with_staff_user(self): block = CapaFactory.create(attempts=1, user_is_staff=True) # Simulate answering a problem that raises an exception - with patch("xmodule.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: + with patch(f"{CAPA}.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: mock_grade.side_effect = exception_class("test error") get_request_dict = {CapaFactory.input_key(): "3.14"} @@ -1506,7 +1515,7 @@ def test_handle_ajax_show_correctness(self, show_correctness, is_correct, expect block = CapaFactory.create(show_correctness=show_correctness, due=self.tomorrow_str, correct=is_correct) # Simulate marking the input correct/incorrect - with patch("xmodule.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: + with patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: mock_is_correct.return_value = is_correct # Check the problem @@ -1529,7 +1538,7 @@ def test_reset_problem(self): block.choose_new_seed = Mock(wraps=block.choose_new_seed) # Stub out HTML rendering - with patch("xmodule.capa_block.ProblemBlock.get_problem_html") as mock_html: + with patch(f"{CAPA}.capa_block.ProblemBlock.get_problem_html") as mock_html: mock_html.return_value = "
Test HTML
" # Reset the problem @@ -1552,7 +1561,7 @@ def test_reset_problem_closed(self): block = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS) # Simulate that the problem is closed - with patch("xmodule.capa_block.ProblemBlock.closed") as mock_closed: + with patch(f"{CAPA}.capa_block.ProblemBlock.closed") as mock_closed: mock_closed.return_value = True # Try to reset the problem @@ -1581,13 +1590,13 @@ def test_rescore_problem_correct(self): # Simulate that all answers are marked correct, no matter # what the input is, by patching LoncapaResponse.evaluate_answers() - with patch("xmodule.capa.responsetypes.LoncapaResponse.evaluate_answers") as mock_evaluate_answers: + with patch(f"{CAPA}.capa.responsetypes.LoncapaResponse.evaluate_answers") as mock_evaluate_answers: mock_evaluate_answers.return_value = CorrectMap( answer_id=CapaFactory.answer_key(), correctness="correct", npoints=1, ) - with patch("xmodule.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: + with patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: mock_is_correct.return_value = True # Check the problem @@ -1628,10 +1637,10 @@ def test_rescore_problem_additional_correct(self): # In case of rescore with only_if_higher=True it should update score of block # if previous score was lower - with patch("xmodule.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: + with patch(f"{CAPA}.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: mock_is_correct.return_value = True block.set_score(block.score_from_lcp(block.lcp)) - with patch("xmodule.capa.responsetypes.NumericalResponse.get_staff_ans") as get_staff_ans: + with patch(f"{CAPA}.capa.responsetypes.NumericalResponse.get_staff_ans") as get_staff_ans: get_staff_ans.return_value = 1 + 0j block.rescore(only_if_higher=True) @@ -1651,7 +1660,7 @@ def test_rescore_problem_incorrect(self): # Simulate that all answers are marked incorrect, no matter # what the input is, by patching LoncapaResponse.evaluate_answers() - with patch("xmodule.capa.responsetypes.LoncapaResponse.evaluate_answers") as mock_evaluate_answers: + with patch(f"{CAPA}.capa.responsetypes.LoncapaResponse.evaluate_answers") as mock_evaluate_answers: mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), "incorrect") block.rescore(only_if_higher=False) @@ -1691,7 +1700,7 @@ def test_rescore_problem_with_grading_method_enable(self): assert block.lcp.context["attempt"] == 1 mock_get_rescore.assert_called() - @patch("xmodule.capa_block.ProblemBlock.publish_grade") + @patch(f"{CAPA}.capa_block.ProblemBlock.publish_grade") def test_rescore_problem_grading_method_always_enabled(self, mock_publish_grade: Mock): """ Test the rescore method when grading method is always enabled by default. @@ -1735,7 +1744,7 @@ def test_rescore_problem_grading_method_always_enabled(self, mock_publish_grade: score=Score(raw_earned=0.33, raw_possible=1), only_if_higher=False ) - @patch("xmodule.capa_block.ProblemBlock.publish_grade") + @patch(f"{CAPA}.capa_block.ProblemBlock.publish_grade") def test_rescore_problem_grading_method_always_enabled_with_various_methods(self, mock_publish_grade: Mock): """ Test the rescore method when grading method is always enabled by default @@ -1777,7 +1786,7 @@ def test_rescore_problem_grading_method_always_enabled_with_various_methods(self block.rescore(only_if_higher=False) assert block.score == Score(raw_earned=1, raw_possible=1) - @patch("xmodule.capa_block.ProblemBlock.publish_grade") + @patch(f"{CAPA}.capa_block.ProblemBlock.publish_grade") def test_rescore_problem_update_grading_method(self, mock_publish_grade: Mock): """ Test the rescore method when the grading method is updated. @@ -1832,7 +1841,7 @@ def test_rescore_problem_not_supported(self): block = CapaFactory.create(done=True) # Try to rescore the problem, and get exception - with patch("xmodule.capa.capa_problem.LoncapaProblem.supports_rescoring") as mock_supports_rescoring: + with patch(f"{CAPA}.capa.capa_problem.LoncapaProblem.supports_rescoring") as mock_supports_rescoring: mock_supports_rescoring.return_value = False with pytest.raises(NotImplementedError): block.rescore(only_if_higher=False) @@ -1939,7 +1948,7 @@ def test_get_score_with_grading_method(self): self.assertEqual(score, expected_score) self.assertEqual(block.score, expected_score) - @patch("xmodule.capa_block.ProblemBlock.score_from_lcp") + @patch(f"{CAPA}.capa_block.ProblemBlock.score_from_lcp") def test_get_score_with_grading_method_updates_score(self, mock_score_from_lcp: Mock): """ Test that the `get_score_with_grading_method` method returns the correct score. @@ -1964,7 +1973,7 @@ def test_get_score_with_grading_method_calls_grading_method_handler(self): block = CapaFactory.create(attempts=1) current_score = Score(raw_earned=0, raw_possible=1) - with patch("xmodule.capa_block.GradingMethodHandler") as mock_handler: + with patch(f"{CAPA}.capa_block.GradingMethodHandler") as mock_handler: mock_handler.return_value.get_score.return_value = current_score block.get_score_with_grading_method(current_score) mock_handler.assert_called_once_with( @@ -2001,7 +2010,7 @@ def test_codejail_error_upon_problem_creation(self): # When codejail safe_exec fails upon problem creation, a LoncapaProblemError should be raised. with pytest.raises(LoncapaProblemError): - with patch("xmodule.capa.capa_problem.safe_exec") as mock_safe_exec: + with patch(f"{CAPA}.capa.capa_problem.safe_exec") as mock_safe_exec: mock_safe_exec.side_effect = SafeExecException() factory.create() @@ -2016,7 +2025,7 @@ def _rescore_problem_error_helper(self, exception_class): block.submit_problem(get_request_dict) # Simulate answering a problem that raises the exception - with patch("xmodule.capa.capa_problem.LoncapaProblem.get_grade_from_current_answers") as mock_rescore: + with patch(f"{CAPA}.capa.capa_problem.LoncapaProblem.get_grade_from_current_answers") as mock_rescore: mock_rescore.side_effect = exception_class("test error \u03a9") with pytest.raises(exception_class): block.rescore(only_if_higher=False) @@ -2058,7 +2067,7 @@ def test_save_problem_closed(self): block = CapaFactory.create(done=False) # Simulate that the problem is closed - with patch("xmodule.capa_block.ProblemBlock.closed") as mock_closed: + with patch(f"{CAPA}.capa_block.ProblemBlock.closed") as mock_closed: mock_closed.return_value = True # Try to save the problem @@ -2271,7 +2280,7 @@ def test_no_max_attempts(self): assert html is not None # assert that we got here without exploding - @patch("xmodule.capa_block.render_to_string") + @patch(f"{CAPA}.capa_block.render_to_string") def test_get_problem_html(self, render_template): """Verify problem HTML rendering uses correct template context and encapsulation.""" render_template.return_value = "
Test Template HTML
" @@ -2288,7 +2297,7 @@ def test_get_problem_html(self, render_template): block.should_show_save_button = Mock(return_value=show_save_button) # Patch the capa problem's HTML rendering - with patch("xmodule.capa.capa_problem.LoncapaProblem.get_html") as mock_html: + with patch(f"{CAPA}.capa.capa_problem.LoncapaProblem.get_html") as mock_html: mock_html.return_value = "
Test Problem HTML
" # Render the problem HTML @@ -2333,7 +2342,7 @@ def test_get_problem_html(self, render_template): """ - @patch("xmodule.capa_block.render_to_string") + @patch(f"{CAPA}.capa_block.render_to_string") def test_demand_hint(self, render_template): """Verify image-based demand hints render correctly without static URL issues.""" # HTML generation is mocked out to be meaningless here, so instead we check @@ -2358,7 +2367,7 @@ def test_demand_hint(self, render_template): assert result["hint_index"] == 0 assert result["should_enable_next_hint"] - @patch("xmodule.capa_block.render_to_string") + @patch(f"{CAPA}.capa_block.render_to_string") def test_single_demand_hint(self, render_template): """ Test the hint button enabled state when there is just a single hint. @@ -2389,7 +2398,7 @@ def test_single_demand_hint(self, render_template): assert result["hint_index"] == 0 assert not result["should_enable_next_hint"] - @patch("xmodule.capa_block.render_to_string") + @patch(f"{CAPA}.capa_block.render_to_string") def test_image_hint(self, render_template): """ Test the hint button shows an image without the static url. @@ -2450,7 +2459,7 @@ def test_input_state_consistency(self): intersection = set(block2.input_state.keys()).intersection(set(block1.input_state.keys())) assert len(intersection) == 0 - @patch("xmodule.capa_block.render_to_string") + @patch(f"{CAPA}.capa_block.render_to_string") def test_get_problem_html_error(self, render_template): """ In production, when an error occurs with the problem HTML @@ -2480,7 +2489,7 @@ def test_get_problem_html_error(self, render_template): # Expect that the block has created a new dummy problem with the error assert original_problem != block.lcp - @patch("xmodule.capa_block.render_to_string") + @patch(f"{CAPA}.capa_block.render_to_string") def test_get_problem_html_error_preview(self, render_template): """ Test the html response when an error occurs with DEBUG off in Studio. @@ -2506,7 +2515,7 @@ def test_get_problem_html_error_preview(self, render_template): assert error_msg in context["problem"]["html"] @override_settings(DEBUG=True) - @patch("xmodule.capa_block.render_to_string") + @patch(f"{CAPA}.capa_block.render_to_string") def test_get_problem_html_error_w_debug(self, render_template): """ Test the html response when an error occurs with DEBUG on @@ -2671,8 +2680,8 @@ def test_random_seed_bins(self, rerandomize): assert 0 <= block.seed < 1000 i -= 1 - @patch("xmodule.capa_block.log") - @patch("xmodule.capa_block.Progress") + @patch(f"{CAPA}.capa_block.log") + @patch(f"{CAPA}.capa_block.Progress") def test_get_progress_error(self, mock_progress, mock_log): """ Check that an exception given in `Progress` produces a `log.exception` call. @@ -2685,7 +2694,7 @@ def test_get_progress_error(self, mock_progress, mock_log): mock_log.exception.assert_called_once_with("Got bad progress") mock_log.reset_mock() - @patch("xmodule.capa_block.Progress") + @patch(f"{CAPA}.capa_block.Progress") def test_get_progress_no_error_if_weight_zero(self, mock_progress): """ Check that if the weight is 0 get_progress does not try to create a Progress object. @@ -2697,7 +2706,7 @@ def test_get_progress_no_error_if_weight_zero(self, mock_progress): assert progress is None assert not mock_progress.called - @patch("xmodule.capa_block.Progress") + @patch(f"{CAPA}.capa_block.Progress") def test_get_progress_calculate_progress_fraction(self, mock_progress): """ Check that score and total are calculated correctly for the progress fraction. @@ -2830,7 +2839,7 @@ def test_problem_display_name_with_default(self, display_name, expected_display_ "", " ", ) - @patch("xmodule.capa_block.render_to_string") + @patch(f"{CAPA}.capa_block.render_to_string") def test_problem_no_display_name(self, display_name, render_template): """ Verify that if problem display name is not provided then a default name is used. @@ -2843,10 +2852,22 @@ def test_problem_no_display_name(self, display_name, render_template): assert context["problem"]["name"] == block.location.block_type +@override_settings(USE_EXTRACTED_PROBLEM_BLOCK=True) +class ExtractedProblemBlockTestCase(_ProblemBlockTestBase): + __test__ = True + + +@override_settings(USE_EXTRACTED_PROBLEM_BLOCK=False) +class BuiltInProblemBlockTestCase(_ProblemBlockTestBase): + __test__ = True + + @ddt.ddt -class ProblemBlockXMLTest(unittest.TestCase): +class _ProblemBlockXMLTestBase(TestCase): """Tests XML strings for various problem types in XBlocks.""" + __test__ = False + sample_checkbox_problem_xml = textwrap.dedent( """ @@ -3693,7 +3714,17 @@ def test_invalid_dropdown_xml(self): CapaFactory.create(xml=problem_xml) -class ComplexEncoderTest(unittest.TestCase): +@override_settings(USE_EXTRACTED_PROBLEM_BLOCK=True) +class ExtractedProblemBlockXMLTestCase(_ProblemBlockXMLTestBase): + __test__ = True + + +@override_settings(USE_EXTRACTED_PROBLEM_BLOCK=False) +class BuiltInProblemBlockXMLTestCase(_ProblemBlockXMLTestBase): + __test__ = True + + +class ComplexEncoderTest(TestCase): """Tests JSON encoding of complex numbers.""" def test_default(self): @@ -3709,11 +3740,13 @@ def test_default(self): @skip_unless_lms @UseUnsafeCodejail() -class ProblemCheckTrackingTest(unittest.TestCase): +class _ProblemCheckTrackingTestBase(TestCase): """ Ensure correct tracking information is included in events emitted during problem checks. """ + __test__ = False + def setUp(self): super().setUp() self.maxDiff = None # pylint: disable=invalid-name @@ -4045,17 +4078,29 @@ def test_get_answer_with_jump_to_id_urls(self): assert problem.runtime.service(problem, "replace_urls").replace_urls.called -class ProblemBlockReportGenerationTest(unittest.TestCase): +@override_settings(USE_EXTRACTED_PROBLEM_BLOCK=True) +class ExtractedProblemCheckTrackingTestCase(_ProblemCheckTrackingTestBase): + __test__ = True + + +@override_settings(USE_EXTRACTED_PROBLEM_BLOCK=False) +class BuiltInProblemCheckTrackingTestCase(_ProblemCheckTrackingTestBase): + __test__ = True + + +class _ProblemBlockReportGenerationTestBase(TestCase): """ Ensure that Capa report generation works correctly """ + __test__ = False + def setUp(self): self.find_question_label_patcher = patch( - "xmodule.capa.capa_problem.LoncapaProblem.find_question_label", lambda self, answer_id: answer_id + f"{CAPA}.capa.capa_problem.LoncapaProblem.find_question_label", lambda self, answer_id: answer_id ) self.find_answer_text_patcher = patch( - "xmodule.capa.capa_problem.LoncapaProblem.find_answer_text", + f"{CAPA}.capa.capa_problem.LoncapaProblem.find_answer_text", lambda self, answer_id, current_answer: current_answer, ) self.find_question_label_patcher.start() @@ -4129,7 +4174,7 @@ def test_generate_report_data_report_loncapa_error(self): """Verify LonCapa errors are captured and reported instead of aborting.""" # Test to make sure reports continue despite loncappa errors, and write them into the report. block = self._get_block() - with patch("xmodule.capa_block.LoncapaProblem") as mock_loncapa_problem: + with patch(f"{CAPA}.capa_block.LoncapaProblem") as mock_loncapa_problem: mock_loncapa_problem.side_effect = LoncapaProblemError report_data = list( block.generate_report_data( @@ -4140,3 +4185,13 @@ def test_generate_report_data_report_loncapa_error(self): ) ) assert "Python Error: No Answer Retrieved" in list(report_data[0][1].values()) + + +@override_settings(USE_EXTRACTED_PROBLEM_BLOCK=True) +class ExtractedProblemBlockReportGenerationTestCase(_ProblemBlockReportGenerationTestBase): + __test__ = True + + +@override_settings(USE_EXTRACTED_PROBLEM_BLOCK=False) +class BuiltInProblemBlockReportGenerationTestCase(_ProblemBlockReportGenerationTestBase): + __test__ = True