classTestCaseWrapper(TestCase,HookHolder):"""A wrapper for the unittest.TestCase class. This serves as a proxy for the testing process to get useful information about the test and functions for testing """def__init__(self,test_param:TestParam,problem:Problem)->None:"""Create a test case wrapper. :param test_param: The test parameter to be used in testing. :param problem: The problem definition to be used in testing. """TestCase.__init__(self)HookHolder.__init__(self)self._test_param=test_paramself._problem=problemself._context:ContextManager|None=Noneself._metadata:GradescopeSubmissionMetadata|None=Noneself._logger=_test_wrapper_logger.getChild(self.test_param.format())@propertydeftest_param(self)->TestParam:"""The test parameter to be used in testing."""returnself._test_param@propertydefproblem(self)->Problem:"""The problem definition to be used in testing."""returnself._problem@propertydefcontext(self)->ContextManager|None:"""The context of the submission."""returnself._context@propertydefmetadata(self)->GradescopeSubmissionMetadata|None:"""The metadata of the submission."""returnself._metadata@stdout_cm_adderdef_eval_regular[Input](self,to_be_eval:Input,param:TestParam)->Any:returnto_be_eval(*deepcopy(param.args),**deepcopy(param.kwargs))@stdout_cm_adderdef_eval_mock_input[Input](self,to_be_eval:Input,param:TestParam)->Any:"""Evaluate the function with mock input."""withpatch("builtins.input",generate_custom_input(deepcopy(param.args))):result=to_be_eval()returnresult@stdout_cm_adderdef_eval_pipeline[Input](self,to_be_eval:Input,param:TestParam)->Any:"""Evaluate the pipeline."""result=[]fori,pipeline_entryinenumerate(param.args):ifnotisinstance(pipeline_entry,PipelineBase):raiseInternalError(f"The {i}th pipeline entry is not a PipelineBase.")ifpipeline_entry.replace:to_be_eval=pipeline_entry(to_be_eval)result.append(None)else:result.append(pipeline_entry(to_be_eval))returnresultdef_select_eval_fn(self)->EvalFn:"""Select the eval function based on the test parameter."""ifself.problem.config.mock_input:returnself._eval_mock_inputelifself.test_param.param_info.gap_is_pipeline:returnself._eval_pipelineelse:returnself._eval_regulardefrun_test(self,submission:Any,result:TestResult)->TestResult:"""Run the test on the submission. :param submission: The submission to be tested. :param result: The result object to be used and written to. :return: The result object passed to this method. """self._setup_test_result(result)try:self._run_test(submission,result)exceptAssertionErrorase:result.add_error(TestFailedError(e),set_failed=result.is_pass_status_unset)exceptSyntaxErrorase:result.add_error(SubmissionSyntaxError(e),set_failed=result.is_pass_status_unset)exceptInternalErrorase:result.add_error(InternalError(e),set_failed=result.is_pass_status_unset)exceptExceptionase:result.add_error(StudentError(e),set_failed=result.is_pass_status_unset)else:ifresult.is_pass_status_unset:result.set_pass_status("passed")returnresultdefcheck_test(self)->Tuple[bool,Any,str]|None:"""Check if the test passes against the gap_expect and gap_expect_stdout. :return: True if the test passes, False if the test fails, None if the test is skipped. """self._logger.debug("Checking test")if(self.test_param.param_info.gap_expectisNoneandself.test_param.param_info.gap_expect_stdoutisNone):self._logger.debug("Test skipped")returnNoneifself.test_param.param_info.gap_override_testisnotNone:raiseWarning("gap_override_test is not None, check_test is ignored.")else:ifself.test_param.param_info.gap_override_check:check_fn:CustomEqualityCheckFn=(self.test_param.param_info.gap_override_check)else:check_fn=self.assertEqual# type: ignoreself._logger.debug(f"Checking test equality with fn {check_fn.__name__}")eval_fn:EvalFn=self._select_eval_fn()self._logger.debug(f"Selected evaluation fn {eval_fn.__name__}")actual_result,actual_out=eval_fn(self.problem.solution,self.test_param)flag=Trueifself.test_param.param_info.gap_expectisnotNone:try:check_fn(actual_result,self.test_param.param_info.gap_expect)exceptAssertionError:self._logger.debug("Check failed because it does not meet gap_expect")flag=Falseifself.test_param.param_info.gap_expect_stdoutisnotNone:try:assertactual_out==self.test_param.param_info.gap_expect_stdoutexceptAssertionError:self._logger.debug("Check failed because it does not meet gap_expect_stdout")flag=Falsereturnflag,actual_result,actual_outdef_setup_test_result(self,result:TestResult)->None:"""Set the test result object to default values specified in the info."""result.set_name(self.test_param.param_info.gap_name)result.set_extra_points(self.test_param.param_info.gap_extra_points)if(self.test_param.param_info.gap_max_scoreisNoneandself.test_param.param_info.gap_weightisNone):result.set_default_weight()else:result.set_max_score(self.test_param.param_info.gap_max_score)result.set_weight(self.test_param.param_info.gap_weight)result.set_hidden(self.test_param.param_info.gap_hidden)ifself.test_param.param_info.gap_descriptionisnotNone:result.add_description(*([self.test_param.param_info.gap_description]ifisinstance(self.test_param.param_info.gap_description,str)elseself.test_param.param_info.gap_description))self._logger.debug(f"Test result initialized: {result}")defgenerate_hooks(self,hook_type:HookTypes)->None:matchhook_type:caseHookTypes.PRE_HOOK:hook_fns=self.test_param.param_info.gap_pre_hookshook_wrapper=PreHookcaseHookTypes.POST_HOOK:hook_fns=self.test_param.param_info.gap_post_hookshook_wrapper=PostHookcase_:raiseValueError(f"Test Case cannot handle hook {hook_type}")ifhook_fnsisNone:self._hooks[hook_type]=[]else:ifnotisinstance(hook_fns,Sequence):hook_fns:Sequence=[hook_fns]self._hooks[hook_type]=[hook_wrapper(self.apply_context(hook_fn))forhook_fninhook_fns]self._logger.debug(f"Generated {hook_type} hooks")defrun_hooks(self,hook_type:HookTypes,data)->None:self._logger.debug(f"Start running {hook_type} hooks")forhookinself.get_or_gen_hooks(hook_type):hook.run(data)self._logger.debug(f"Finished running {hook_type} hooks")def_run_test(self,submission:Any,result:TestResult)->TestResult:"""Run the test on the submission. :param submission: The submission to be tested. :param result: The result object to be used and written to. """self._logger.debug(f"Running test on submission {submission}")ifself.test_param.param_info.gap_override_testisnotNone:self._logger.debug("Handing testing to gap_override_test")override_test:CustomTestFn=self.apply_context(self.test_param.param_info.gap_override_test)override_test(CustomTestData(self,result,self.problem.solution,submission))else:eval_fn:EvalFn=self._select_eval_fn()self._logger.debug(f"Selected evaluation fn {eval_fn.__name__}")self.run_hooks(HookTypes.PRE_HOOK,PreHookData(self,result,self.problem.solution,submission),)self._logger.debug(f"Running test evaluation")expected=eval_fn(self.problem.solution,self.test_param)actual=eval_fn(submission,self.test_param)self.check_results(expected,actual)self.run_hooks(HookTypes.POST_HOOK,PostHookData(self,result,self.problem.solution,submission,expected,actual,),)self.tear_down_hooks(HookTypes.PRE_HOOK)self.tear_down_hooks(HookTypes.POST_HOOK)self._logger.debug("Test completed")returnresultdefcheck_results(self,expected:ResultBundle,actual:ResultBundle)->None:ifself.test_param.param_info.gap_override_check:check_fn:CustomEqualityCheckFn=(self.test_param.param_info.gap_override_check)else:check_fn=self.assertEqual# type: ignoreself._logger.debug(f"Checking test equality with fn {check_fn.__name__}")check_fn(expected.output,actual.output)ifself.problem.config.check_stdout:check_fn(expected.stdout,actual.stdout)self._logger.debug("Test checked")defapply_context[T:FunctionType](self,fn:T)->T:if(self.problem.config.easy_contextorself.test_param.param_info.gap_easy_context):self._logger.debug("Using easy context")returnapply_context_on_fn(fn,self.context)else:returnfndefload_context(self,context:ContextManager)->Self:"""Load the submission context into the test case. :param context: The context to load. """self._context=deepcopy(context)self._logger.debug(f"Context loaded: {self._context}")returnselfdefload_metadata(self,metadata:GradescopeSubmissionMetadata|None)->Self:"""Load the submission metadata into the test case. :param metadata: The metadata to load. The metadata could be None. """self._metadata=metadataself._logger.debug(f"Metadata loaded: {self._metadata}")returnself
def__init__(self,test_param:TestParam,problem:Problem)->None:"""Create a test case wrapper. :param test_param: The test parameter to be used in testing. :param problem: The problem definition to be used in testing. """TestCase.__init__(self)HookHolder.__init__(self)self._test_param=test_paramself._problem=problemself._context:ContextManager|None=Noneself._metadata:GradescopeSubmissionMetadata|None=Noneself._logger=_test_wrapper_logger.getChild(self.test_param.format())
defcheck_test(self)->Tuple[bool,Any,str]|None:"""Check if the test passes against the gap_expect and gap_expect_stdout. :return: True if the test passes, False if the test fails, None if the test is skipped. """self._logger.debug("Checking test")if(self.test_param.param_info.gap_expectisNoneandself.test_param.param_info.gap_expect_stdoutisNone):self._logger.debug("Test skipped")returnNoneifself.test_param.param_info.gap_override_testisnotNone:raiseWarning("gap_override_test is not None, check_test is ignored.")else:ifself.test_param.param_info.gap_override_check:check_fn:CustomEqualityCheckFn=(self.test_param.param_info.gap_override_check)else:check_fn=self.assertEqual# type: ignoreself._logger.debug(f"Checking test equality with fn {check_fn.__name__}")eval_fn:EvalFn=self._select_eval_fn()self._logger.debug(f"Selected evaluation fn {eval_fn.__name__}")actual_result,actual_out=eval_fn(self.problem.solution,self.test_param)flag=Trueifself.test_param.param_info.gap_expectisnotNone:try:check_fn(actual_result,self.test_param.param_info.gap_expect)exceptAssertionError:self._logger.debug("Check failed because it does not meet gap_expect")flag=Falseifself.test_param.param_info.gap_expect_stdoutisnotNone:try:assertactual_out==self.test_param.param_info.gap_expect_stdoutexceptAssertionError:self._logger.debug("Check failed because it does not meet gap_expect_stdout")flag=Falsereturnflag,actual_result,actual_out
defload_context(self,context:ContextManager)->Self:"""Load the submission context into the test case. :param context: The context to load. """self._context=deepcopy(context)self._logger.debug(f"Context loaded: {self._context}")returnself
defload_metadata(self,metadata:GradescopeSubmissionMetadata|None)->Self:"""Load the submission metadata into the test case. :param metadata: The metadata to load. The metadata could be None. """self._metadata=metadataself._logger.debug(f"Metadata loaded: {self._metadata}")returnself
defrun_test(self,submission:Any,result:TestResult)->TestResult:"""Run the test on the submission. :param submission: The submission to be tested. :param result: The result object to be used and written to. :return: The result object passed to this method. """self._setup_test_result(result)try:self._run_test(submission,result)exceptAssertionErrorase:result.add_error(TestFailedError(e),set_failed=result.is_pass_status_unset)exceptSyntaxErrorase:result.add_error(SubmissionSyntaxError(e),set_failed=result.is_pass_status_unset)exceptInternalErrorase:result.add_error(InternalError(e),set_failed=result.is_pass_status_unset)exceptExceptionase:result.add_error(StudentError(e),set_failed=result.is_pass_status_unset)else:ifresult.is_pass_status_unset:result.set_pass_status("passed")returnresult