diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9858e272..87c04fb4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,4 +32,4 @@ jobs: - uses: DeterminateSystems/nix-installer-action@main - uses: DeterminateSystems/magic-nix-cache-action@main - run: echo "${GITHUB_WORKSPACE}" >> $GITHUB_PATH - - run: nix develop .#types -c pyright ./tested + - run: nix develop .#types -c pyright ./tested ./tests diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index dd925df4..3ca6d740 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -3,7 +3,7 @@ name: Integration tests on: [ pull_request ] env: - EXERCISES_COMMIT: c3785b61574885564499ddf14943dfc7c6fe1aa6 + EXERCISES_COMMIT: 31ef0f174efaeba2a37415115e7fd0332573d9b2 jobs: # Runs the test suite in a slightly modified Docker image used by Dodona. diff --git a/pyproject.toml b/pyproject.toml index a1fca831..dd101e9d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,13 +10,11 @@ line-length = 88 extend-exclude="tests/exercises/*" [tool.pytest.ini_options] -markers = [ - "haskell", # Run the haskell tests - "linter", # Run linter tests - "slow", # Slow tests -] addopts = "--ignore=tests/test_integration_javascript.py" +[tool.pyright] +exclude = ["tests/exercises/", "tests/snapshots/", "tests/descriptions/"] + [tool.poetry] name = "tested" version = "1.0.0" diff --git a/tested/dsl/schema_draft7.json b/tested/dsl/schema-strict.json similarity index 51% rename from tested/dsl/schema_draft7.json rename to tested/dsl/schema-strict.json index 5e6b6a70..6808880a 100644 --- a/tested/dsl/schema_draft7.json +++ b/tested/dsl/schema-strict.json @@ -1,42 +1,38 @@ { - "$id" : "https://github.com/dodona-edu/universal-judge/blob/master/tested/dsl/schema.yaml", + "$id" : "tested:dsl:schema7", "$schema" : "http://json-schema.org/draft-07/schema#", - "title" : "DSL Schema", - "description" : "DSL test suite for TESTed", + "title" : "TESTed-DSL", "oneOf" : [ { "$ref" : "#/definitions/_rootObject" }, + { + "$ref" : "#/definitions/_tabList" + }, { "$ref" : "#/definitions/_unitList" } ], "definitions" : { - "_unitList" : { - "type" : "array", - "minItems" : 1, - "items" : { - "$ref" : "#/definitions/unit" - } - }, - "_testcaseList" : { - "type" : "array", - "minItems" : 1, - "items" : { - "$ref" : "#/definitions/testcase" - } - }, - "_scriptList" : { - "type" : "array", - "minItems" : 1, - "items" : { - "$ref" : "#/definitions/test" - } - }, - "_rootObject" : { - "type" : "object", + "_rootObject": { + "type": "object", + "oneOf" : [ + { + "required" : ["tabs"], + "not": { + "required" : ["units"] + } + }, + { + "required" : ["units"], + "not": { + "required" : ["tabs"] + } + } + ], "properties" : { "files" : { + "description" : "A list of files used in the test suite.", "type" : "array", "items" : { "$ref" : "#/definitions/file" @@ -47,43 +43,97 @@ "description" : "Namespace of the submitted solution, in `snake_case`" }, "tabs" : { - "$ref" : "#/definitions/_unitList" + "$ref" : "#/definitions/_tabList" }, - "units" : { + "units": { "$ref" : "#/definitions/_unitList" }, "language" : { "description" : "Indicate that all code is in a specific language.", "oneOf" : [ { - "$ref" : "#/definitions/supportedLanguage" + "$ref" : "#/definitions/programmingLanguage" }, { "const" : "tested" } - ], - "default" : "tested" + ] }, "definitions": { "description": "Define hashes to use elsewhere.", "type": "object" } + } + }, + "_tabList" : { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/tab" + } + }, + "_unitList" : { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/unit" + } + }, + "tab" : { + "type" : "object", + "description" : "A tab in the test suite.", + "required" : [ + "tab" + ], + "properties" : { + "files" : { + "type" : "array", + "items" : { + "$ref" : "#/definitions/file" + } + }, + "hidden" : { + "type" : "boolean", + "description" : "Defines if the unit/tab is hidden for the student or not" + }, + "tab" : { + "type" : "string", + "description" : "The name of this tab." + }, + "definitions": { + "description": "Define objects to use elsewhere.", + "type": "object" + } }, "oneOf" : [ { "required" : [ - "tabs" - ] + "contexts" + ], + "properties" : { + "contexts" : { + "$ref" : "#/definitions/_contextList" + } + } }, { "required" : [ - "units" - ] + "testcases" + ], + "properties" : { + "testcases" : { + "$ref" : "#/definitions/_testcaseList" + } + } } ] }, "unit" : { "type" : "object", + "description" : "A unit in the test suite.", + "required" : [ + "unit" + ], "properties" : { "files" : { "type" : "array", @@ -96,69 +146,71 @@ "description" : "Defines if the unit/tab is hidden for the student or not" }, "unit" : { - "type" : "string", - "description" : "The name of this unit." - }, - "tab" : { "type" : "string", "description" : "The name of this tab." }, - "cases" : { - "$ref" : "#/definitions/_testcaseList" - }, - "contexts" : { - "$ref" : "#/definitions/_testcaseList" - }, - "scripts" : { - "$ref" : "#/definitions/_scriptList" - }, - "testcases" : { - "$ref" : "#/definitions/_scriptList" - }, "definitions": { - "description": "Define hashes to use elsewhere.", + "description": "Define objects to use elsewhere.", "type": "object" } }, "oneOf" : [ { "required" : [ - "tab" + "cases" ], - "oneOf" : [ - { - "required" : [ - "contexts" - ] - }, - { - "required" : [ - "testcases" - ] + "properties" : { + "cases" : { + "$ref" : "#/definitions/_caseList" } - ] + } }, { "required" : [ - "unit" + "scripts" ], - "oneOf" : [ - { - "required" : [ - "cases" - ] - }, - { - "required" : [ - "scripts" - ] + "properties" : { + "scripts" : { + "$ref" : "#/definitions/_scriptList" } - ] + } } ] }, - "testcase" : { + "_contextList": { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/context" + } + }, + "_caseList": { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/case" + } + }, + "_testcaseList": { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/testcase" + } + }, + "_scriptList": { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/script" + } + }, + "context" : { "type" : "object", + "description" : "A set of testcase in the same context.", + "required" : [ + "testcases" + ], "properties" : { "files" : { "type" : "array", @@ -171,28 +223,36 @@ "description" : "Description of this context." }, "testcases" : { - "$ref" : "#/definitions/_scriptList" + "$ref" : "#/definitions/_testcaseList" + } + } + }, + "case" : { + "type" : "object", + "description" : "A test case.", + "required" : [ + "script" + ], + "properties" : { + "files" : { + "type" : "array", + "items" : { + "$ref" : "#/definitions/file" + } + }, + "context" : { + "type" : "string", + "description" : "Description of this context." }, "script" : { "$ref" : "#/definitions/_scriptList" } - }, - "oneOf" : [ - { - "required" : [ - "testcases" - ] - }, - { - "required" : [ - "script" - ] - } - ] + } }, - "test" : { + "testcase" : { "type" : "object", "description" : "An individual test for a statement or expression", + "additionalProperties" : false, "properties" : { "description": { "$ref": "#/definitions/message" @@ -230,7 +290,8 @@ "description" : "Expected exception message", "oneOf" : [ { - "type" : "string" + "type" : "string", + "description" : "Message of the expected exception." }, { "type" : "object", @@ -239,12 +300,15 @@ ], "properties" : { "message" : { - "type" : "string" + "type" : "string", + "description" : "Message of the expected exception." }, "types" : { + "minProperties" : 1, + "description" : "Language mapping of expected exception types.", "type" : "object", "propertyNames" : { - "$ref" : "#/definitions/supportedLanguage" + "$ref" : "#/definitions/programmingLanguage" }, "items" : { "type" : "string" @@ -262,7 +326,7 @@ }, "return" : { "description" : "Expected return value", - "$ref" : "#/definitions/advancedValueOutputChannel" + "$ref" : "#/definitions/returnOutputChannel" }, "stderr" : { "description" : "Expected output at stderr", @@ -272,16 +336,21 @@ "description" : "Expected output at stdout", "$ref" : "#/definitions/textOutputChannel" }, - "exitCode" : { + "exit_code" : { "type" : "integer", "description" : "Expected exit code for the run" } } }, - "textOutputChannel" : { - "anyOf" : [ - { - "description" : "A simple value which is converted into a string.", + "script" : { + "type" : "object", + "description" : "An individual test (script) for a statement or expression", + "properties" : { + "description": { + "$ref": "#/definitions/message" + }, + "stdin" : { + "description" : "Stdin for this context", "type" : [ "string", "number", @@ -289,53 +358,149 @@ "boolean" ] }, - { - "$ref" : "#/definitions/advancedTextOutputChannel" + "arguments" : { + "type" : "array", + "description" : "Array of program call arguments", + "items" : { + "type" : [ + "string", + "number", + "integer", + "boolean" + ] + } + }, + "statement" : { + "description" : "The statement to evaluate.", + "$ref" : "#/definitions/expressionOrStatement" + }, + "expression" : { + "description" : "The expression to evaluate.", + "$ref" : "#/definitions/expressionOrStatement" + }, + "exception" : { + "description" : "Expected exception message", + "oneOf" : [ + { + "type" : "string", + "description" : "Message of the expected exception." + }, + { + "type" : "object", + "required" : [ + "types" + ], + "properties" : { + "message" : { + "type" : "string", + "description" : "Message of the expected exception." + }, + "types" : { + "minProperties" : 1, + "description" : "Language mapping of expected exception types.", + "type" : "object", + "propertyNames" : { + "$ref" : "#/definitions/programmingLanguage" + }, + "items" : { + "type" : "string" + } + } + } + } + ] + }, + "files" : { + "type" : "array", + "items" : { + "$ref" : "#/definitions/file" + } + }, + "return" : { + "description" : "Expected return value", + "$ref" : "#/definitions/returnOutputChannel" + }, + "stderr" : { + "description" : "Expected output at stderr", + "$ref" : "#/definitions/textOutputChannel" + }, + "stdout" : { + "description" : "Expected output at stdout", + "$ref" : "#/definitions/textOutputChannel" + }, + "exit_code" : { + "type" : "integer", + "description" : "Expected exit code for the run" } - ] + } }, "expressionOrStatement" : { "oneOf" : [ { - "type" : "string" + "type" : "string", + "format" : "tested-dsl-expression", + "description" : "A statement of expression in Python-like syntax as YAML string." }, { + "description" : "Programming-language-specific statement or expression.", "type" : "object", + "minProperties" : 1, "propertyNames" : { - "$ref" : "#/definitions/supportedLanguage" + "$ref" : "#/definitions/programmingLanguage" }, "items" : { "type" : "string", - "description" : "A language-specific literal which will be used verbatim." + "description" : "A language-specific literal, which will be used verbatim." } } ] }, - "advancedTextOutputChannel" : { + "yamlValueOrPythonExpression": { + "oneOf" : [ + { + "$ref" : "#/definitions/yamlValue" + }, + { + "type" : "expression", + "format" : "tested-dsl-expression", + "description" : "An expression in Python-syntax." + } + ] + }, + "file" : { "type" : "object", - "description" : "Advanced output for a text output channel, such as stdout or stderr.", + "description" : "A file used in the test suite.", "required" : [ - "data" + "name", + "url" ], "properties" : { - "data" : { - "description" : "The expected data types.", - "type" : [ - "string", - "number", - "integer", - "boolean" - ] + "name" : { + "type" : "string", + "description" : "The filename, including the file extension." + }, + "url" : { + "type" : "string", + "format" : "uri", + "description" : "Relative path to the file in the `description` folder of an exercise." } - }, - "oneOf" : [ + } + }, + "textOutputChannel" : { + "anyOf" : [ { + "$ref" : "#/definitions/textualType" + }, + { + "type" : "object", + "description" : "Built-in oracle for text values.", + "required" : ["data"], "properties" : { - "oracle" : { - "type" : "string", - "enum" : [ - "builtin" - ] + "data": { + "$ref" : "#/definitions/textualType" + }, + "oracle": { + "const" : "builtin" }, "config" : { "$ref" : "#/definitions/textConfigurationOptions" @@ -343,16 +508,67 @@ } }, { + "type" : "object", + "description" : "Custom oracle for text values.", + "required" : ["oracle", "file", "data"], + "properties" : { + "oracle": { + "const" : "custom_check" + }, + "file" : { + "type" : "string", + "description" : "The path to the file containing the custom check function." + }, + "name" : { + "type" : "string", + "description" : "The name of the custom check function.", + "default" : "evaluate" + }, + "arguments" : { + "type" : "array", + "description" : "List of YAML (or tagged expression) values to use as arguments to the function.", + "items" : { + "$ref" : "#/definitions/yamlValueOrPythonExpression" + } + } + } + } + ] + }, + "returnOutputChannel": { + "oneOf" : [ + { + "$ref" : "#/definitions/yamlValueOrPythonExpression" + }, + { + "type" : "oracle", + "additionalProperties" : false, "required" : [ + "value" + ], + "properties" : { + "oracle" : { + "const" : "builtin" + }, + "value" : { + "$ref" : "#/definitions/yamlValueOrPythonExpression" + } + } + }, + { + "type" : "oracle", + "additionalProperties" : false, + "required" : [ + "value", "oracle", "file" ], "properties" : { "oracle" : { - "type" : "string", - "enum" : [ - "custom_check" - ] + "const" : "custom_check" + }, + "value" : { + "$ref" : "#/definitions/yamlValueOrPythonExpression" }, "file" : { "type" : "string", @@ -365,70 +581,52 @@ }, "arguments" : { "type" : "array", - "description" : "List of YAML (or tagged expression) values to use as arguments to the function." + "description" : "List of YAML (or tagged expression) values to use as arguments to the function.", + "items" : { + "$ref" : "#/definitions/yamlValueOrPythonExpression" + } } } } ] }, - "advancedValueOutputChannel" : { - "anyOf" : [ - {}, + "programmingLanguage" : { + "type" : "string", + "description" : "One of the programming languages supported by TESTed.", + "enum" : [ + "bash", + "c", + "haskell", + "java", + "javascript", + "kotlin", + "python", + "runhaskell", + "csharp" + ] + }, + "message": { + "oneOf": [ { - "type" : "string", - "description" : "A 'Python' value to parse and use as the expected type." + "type": "string", + "description" : "A simple message to display." }, { - "type" : "object", - "description" : "A custom check function.", + "type": "object", "required" : [ - "value" + "description" ], "properties" : { - "value" : { + "description": { "type" : "string", - "description" : "The expected value." - } - }, - "oneOf" : [ - { - "properties" : { - "oracle" : { - "type" : "string", - "enum" : [ - "builtin" - ] - } - } + "description" : "The message to display." }, - { - "required" : [ - "oracle", - "file" - ], - "properties" : { - "oracle" : { - "type" : "string", - "enum" : [ - "custom_check" - ] - }, - "file" : { - "type" : "string", - "description" : "The path to the file containing the custom check function." - }, - "name" : { - "type" : "string", - "description" : "The name of the custom check function.", - "default" : "evaluate" - }, - "arguments" : { - "type" : "array", - "description" : "List of YAML (or tagged expression) values to use as arguments to the function." - } - } + "format": { + "type" : "string", + "default" : "text", + "description" : "The format of the message, either a programming language, 'text' or 'html'." } - ] + } } ] }, @@ -467,60 +665,20 @@ } } }, - "file" : { - "type" : "object", - "description" : "Path to a file for input.", - "required" : [ - "name", - "url" - ], - "properties" : { - "name" : { - "type" : "string", - "description" : "File name" - }, - "url" : { - "type" : "string", - "format" : "uri", - "description" : "Relative path to the file in the `description` folder of a Dodona exercise" - } - } - }, - "supportedLanguage" : { - "type" : "string", - "enum" : [ - "bash", - "c", - "haskell", - "java", - "javascript", - "kotlin", - "python", - "runhaskell", - "csharp" + "textualType": { + "description" : "Simple textual value, converted to string.", + "type" : [ + "string", + "number", + "integer", + "boolean" ] }, - "message": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "object", - "required" : [ - "description" - ], - "properties" : { - "description": { - "type" : "string" - }, - "format": { - "type" : "string", - "default" : "text" - } - } - } - ] + "yamlValue": { + "description" : "A value represented as YAML.", + "not": { + "type": ["oracle", "expression"] + } } } } diff --git a/tested/dsl/schema.json b/tested/dsl/schema.json index 625767f0..1a4bc381 100644 --- a/tested/dsl/schema.json +++ b/tested/dsl/schema.json @@ -1,46 +1,41 @@ { - "$id" : "https://github.com/dodona-edu/universal-judge/blob/master/tested/dsl/schema.yaml", - "$schema" : "https://json-schema.org/draft/2019-09/schema", - "title" : "DSL Schema", - "description" : "DSL test suite for TESTed", + "$id" : "tested:dsl:schema7", + "$schema" : "http://json-schema.org/draft-07/schema#", + "title" : "TESTed-DSL", "oneOf" : [ { - "$ref" : "#/$defs/_rootObject" + "$ref" : "#/definitions/_rootObject" }, { - "$ref" : "#/$defs/_unitList" + "$ref" : "#/definitions/_tabList" + }, + { + "$ref" : "#/definitions/_unitList" } ], - "$defs" : { - "_unitList" : { - "type" : "array", - "minItems" : 1, - "items" : { - "$ref" : "#/$defs/unit" - } - }, - "_testcaseList" : { - "type" : "array", - "minItems" : 1, - "items" : { - "$ref" : "#/$defs/testcase" - } - }, - "_scriptList" : { - "type" : "array", - "minItems" : 1, - "items" : { - "$ref" : "#/$defs/script" - } - }, - "_rootObject" : { - "type" : "object", - "unevaluatedProperties" : false, + "definitions" : { + "_rootObject": { + "type": "object", + "oneOf" : [ + { + "required" : ["tabs"], + "not": { + "required" : ["units"] + } + }, + { + "required" : ["units"], + "not": { + "required" : ["tabs"] + } + } + ], "properties" : { "files" : { + "description" : "A list of files used in the test suite.", "type" : "array", "items" : { - "$ref" : "#/$defs/file" + "$ref" : "#/definitions/file" } }, "namespace" : { @@ -48,49 +43,102 @@ "description" : "Namespace of the submitted solution, in `snake_case`" }, "tabs" : { - "$ref" : "#/$defs/_unitList" + "$ref" : "#/definitions/_tabList" }, - "units" : { - "$ref" : "#/$defs/_unitList" + "units": { + "$ref" : "#/definitions/_unitList" }, "language" : { "description" : "Indicate that all code is in a specific language.", "oneOf" : [ { - "$ref" : "#/$defs/supportedLanguage" + "$ref" : "#/definitions/programmingLanguage" }, { "const" : "tested" } - ], - "default" : "tested" + ] }, "definitions": { "description": "Define hashes to use elsewhere.", "type": "object" } + } + }, + "_tabList" : { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/tab" + } + }, + "_unitList" : { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/unit" + } + }, + "tab" : { + "type" : "object", + "description" : "A tab in the test suite.", + "required" : [ + "tab" + ], + "properties" : { + "files" : { + "type" : "array", + "items" : { + "$ref" : "#/definitions/file" + } + }, + "hidden" : { + "type" : "boolean", + "description" : "Defines if the unit/tab is hidden for the student or not" + }, + "tab" : { + "type" : "string", + "description" : "The name of this tab." + }, + "definitions": { + "description": "Define objects to use elsewhere.", + "type": "object" + } }, "oneOf" : [ { "required" : [ - "tabs" - ] + "contexts" + ], + "properties" : { + "contexts" : { + "$ref" : "#/definitions/_contextList" + } + } }, { "required" : [ - "units" - ] + "testcases" + ], + "properties" : { + "testcases" : { + "$ref" : "#/definitions/_testcaseList" + } + } } ] }, "unit" : { "type" : "object", - "unevaluatedProperties" : false, + "description" : "A unit in the test suite.", + "required" : [ + "unit" + ], "properties" : { "files" : { "type" : "array", "items" : { - "$ref" : "#/$defs/file" + "$ref" : "#/definitions/file" } }, "hidden" : { @@ -98,75 +146,76 @@ "description" : "Defines if the unit/tab is hidden for the student or not" }, "unit" : { - "type" : "string", - "description" : "The name of this unit." - }, - "tab" : { "type" : "string", "description" : "The name of this tab." }, - "cases" : { - "$ref" : "#/$defs/_testcaseList" - }, - "contexts" : { - "$ref" : "#/$defs/_testcaseList" - }, - "scripts" : { - "$ref" : "#/$defs/_scriptList" - }, - "testcases" : { - "$ref" : "#/$defs/_scriptList" - }, "definitions": { - "description": "Define hashes to use elsewhere.", + "description": "Define objects to use elsewhere.", "type": "object" } }, "oneOf" : [ { "required" : [ - "tab" + "cases" ], - "oneOf" : [ - { - "required" : [ - "contexts" - ] - }, - { - "required" : [ - "testcases" - ] + "properties" : { + "cases" : { + "$ref" : "#/definitions/_caseList" } - ] + } }, { "required" : [ - "unit" + "scripts" ], - "oneOf" : [ - { - "required" : [ - "cases" - ] - }, - { - "required" : [ - "scripts" - ] + "properties" : { + "scripts" : { + "$ref" : "#/definitions/_scriptList" } - ] + } } ] }, - "testcase" : { + "_contextList": { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/context" + } + }, + "_caseList": { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/case" + } + }, + "_testcaseList": { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/testcase" + } + }, + "_scriptList": { + "type" : "array", + "minItems" : 1, + "items" : { + "$ref" : "#/definitions/script" + } + }, + "context" : { "type" : "object", - "unevaluatedProperties" : false, + "description" : "A set of testcase in the same context.", + "required" : [ + "testcases" + ], "properties" : { "files" : { "type" : "array", "items" : { - "$ref" : "#/$defs/file" + "$ref" : "#/definitions/file" } }, "context" : { @@ -174,32 +223,39 @@ "description" : "Description of this context." }, "testcases" : { - "$ref" : "#/$defs/_scriptList" - }, - "script" : { - "$ref" : "#/$defs/_scriptList" + "$ref" : "#/definitions/_testcaseList" } - }, - "oneOf" : [ - { - "required" : [ - "testcases" - ] + } + }, + "case" : { + "type" : "object", + "description" : "A test case.", + "required" : [ + "script" + ], + "properties" : { + "files" : { + "type" : "array", + "items" : { + "$ref" : "#/definitions/file" + } }, - { - "required" : [ - "script" - ] + "context" : { + "type" : "string", + "description" : "Description of this context." + }, + "script" : { + "$ref" : "#/definitions/_scriptList" } - ] + } }, - "script" : { + "testcase" : { "type" : "object", - "unevaluatedProperties" : false, "description" : "An individual test for a statement or expression", + "additionalProperties" : false, "properties" : { "description": { - "$ref": "#/$defs/message" + "$ref": "#/definitions/message" }, "stdin" : { "description" : "Stdin for this context", @@ -224,17 +280,18 @@ }, "statement" : { "description" : "The statement to evaluate.", - "$ref" : "#/$defs/expressionOrStatement" + "$ref" : "#/definitions/expressionOrStatement" }, "expression" : { "description" : "The expression to evaluate.", - "$ref" : "#/$defs/expressionOrStatement" + "$ref" : "#/definitions/expressionOrStatement" }, "exception" : { "description" : "Expected exception message", "oneOf" : [ { - "type" : "string" + "type" : "string", + "description" : "Message of the expected exception." }, { "type" : "object", @@ -243,12 +300,15 @@ ], "properties" : { "message" : { - "type" : "string" + "type" : "string", + "description" : "Message of the expected exception." }, "types" : { + "minProperties" : 1, + "description" : "Language mapping of expected exception types.", "type" : "object", "propertyNames" : { - "$ref" : "#/$defs/supportedLanguage" + "$ref" : "#/definitions/programmingLanguage" }, "items" : { "type" : "string" @@ -261,20 +321,20 @@ "files" : { "type" : "array", "items" : { - "$ref" : "#/$defs/file" + "$ref" : "#/definitions/file" } }, "return" : { - "description" : "Expected return value.", - "$ref" : "#/$defs/advancedValueOutputChannel" + "description" : "Expected return value", + "$ref" : "#/definitions/returnOutputChannel" }, "stderr" : { "description" : "Expected output at stderr", - "$ref" : "#/$defs/textOutputChannel" + "$ref" : "#/definitions/textOutputChannel" }, "stdout" : { "description" : "Expected output at stdout", - "$ref" : "#/$defs/textOutputChannel" + "$ref" : "#/definitions/textOutputChannel" }, "exit_code" : { "type" : "integer", @@ -282,10 +342,15 @@ } } }, - "textOutputChannel" : { - "anyOf" : [ - { - "description" : "A simple value which is converted into a string.", + "script" : { + "type" : "object", + "description" : "An individual test (script) for a statement or expression", + "properties" : { + "description": { + "$ref": "#/definitions/message" + }, + "stdin" : { + "description" : "Stdin for this context", "type" : [ "string", "number", @@ -293,71 +358,217 @@ "boolean" ] }, - { - "$ref" : "#/$defs/advancedTextOutputChannel" + "arguments" : { + "type" : "array", + "description" : "Array of program call arguments", + "items" : { + "type" : [ + "string", + "number", + "integer", + "boolean" + ] + } + }, + "statement" : { + "description" : "The statement to evaluate.", + "$ref" : "#/definitions/expressionOrStatement" + }, + "expression" : { + "description" : "The expression to evaluate.", + "$ref" : "#/definitions/expressionOrStatement" + }, + "exception" : { + "description" : "Expected exception message", + "oneOf" : [ + { + "type" : "string", + "description" : "Message of the expected exception." + }, + { + "type" : "object", + "required" : [ + "types" + ], + "properties" : { + "message" : { + "type" : "string", + "description" : "Message of the expected exception." + }, + "types" : { + "minProperties" : 1, + "description" : "Language mapping of expected exception types.", + "type" : "object", + "propertyNames" : { + "$ref" : "#/definitions/programmingLanguage" + }, + "items" : { + "type" : "string" + } + } + } + } + ] + }, + "files" : { + "type" : "array", + "items" : { + "$ref" : "#/definitions/file" + } + }, + "return" : { + "description" : "Expected return value", + "$ref" : "#/definitions/returnOutputChannel" + }, + "stderr" : { + "description" : "Expected output at stderr", + "$ref" : "#/definitions/textOutputChannel" + }, + "stdout" : { + "description" : "Expected output at stdout", + "$ref" : "#/definitions/textOutputChannel" + }, + "exit_code" : { + "type" : "integer", + "description" : "Expected exit code for the run" } - ] + } }, "expressionOrStatement" : { "oneOf" : [ { - "type" : "string" + "type" : "string", + "format" : "tested-dsl-expression", + "description" : "A statement of expression in Python-like syntax as YAML string." }, { + "description" : "Programming-language-specific statement or expression.", "type" : "object", + "minProperties" : 1, "propertyNames" : { - "$ref" : "#/$defs/supportedLanguage" + "$ref" : "#/definitions/programmingLanguage" }, "items" : { "type" : "string", - "description" : "A language-specific literal which will be used verbatim." + "description" : "A language-specific literal, which will be used verbatim." } } ] }, - "advancedTextOutputChannel" : { + "yamlValueOrPythonExpression": { + "oneOf" : [ + { + "$ref" : "#/definitions/yamlValue" + }, + { + "type" : "string", + "format" : "tested-dsl-expression", + "description" : "An expression in Python-syntax." + } + ] + }, + "file" : { "type" : "object", - "unevaluatedProperties" : false, - "description" : "Advanced output for a text output channel, such as stdout or stderr.", + "description" : "A file used in the test suite.", "required" : [ - "data" + "name", + "url" ], "properties" : { - "data" : { - "description" : "The expected data types.", - "type" : [ - "string", - "number", - "integer", - "boolean" - ] + "name" : { + "type" : "string", + "description" : "The filename, including the file extension." + }, + "url" : { + "type" : "string", + "format" : "uri", + "description" : "Relative path to the file in the `description` folder of an exercise." } - }, + } + }, + "textOutputChannel" : { + "anyOf" : [ + { + "$ref" : "#/definitions/textualType" + }, + { + "type" : "object", + "description" : "Built-in oracle for text values.", + "required" : ["data"], + "properties" : { + "data": { + "$ref" : "#/definitions/textualType" + }, + "oracle": { + "const" : "builtin" + }, + "config" : { + "$ref" : "#/definitions/textConfigurationOptions" + } + } + }, + { + "type" : "object", + "description" : "Custom oracle for text values.", + "required" : ["oracle", "file", "data"], + "properties" : { + "oracle": { + "const" : "custom_check" + }, + "file" : { + "type" : "string", + "description" : "The path to the file containing the custom check function." + }, + "name" : { + "type" : "string", + "description" : "The name of the custom check function.", + "default" : "evaluate" + }, + "arguments" : { + "type" : "array", + "description" : "List of YAML (or tagged expression) values to use as arguments to the function.", + "items" : { + "$ref" : "#/definitions/yamlValueOrPythonExpression" + } + } + } + } + ] + }, + "returnOutputChannel": { "oneOf" : [ { + "$ref" : "#/definitions/yamlValueOrPythonExpression" + }, + { + "type" : "object", + "additionalProperties" : false, + "required" : [ + "value" + ], "properties" : { "oracle" : { - "type" : "string", - "enum" : [ - "builtin" - ] + "const" : "builtin" }, - "config" : { - "$ref" : "#/$defs/textConfigurationOptions" + "value" : { + "$ref" : "#/definitions/yamlValueOrPythonExpression" } } }, { + "type" : "object", + "additionalProperties" : false, "required" : [ + "value", "oracle", "file" ], "properties" : { "oracle" : { - "type" : "string", - "enum" : [ - "custom_check" - ] + "const" : "custom_check" + }, + "value" : { + "$ref" : "#/definitions/yamlValueOrPythonExpression" }, "file" : { "type" : "string", @@ -370,71 +581,52 @@ }, "arguments" : { "type" : "array", - "description" : "List of YAML (or tagged expression) values to use as arguments to the function." + "description" : "List of YAML (or tagged expression) values to use as arguments to the function.", + "items" : { + "$ref" : "#/definitions/yamlValueOrPythonExpression" + } } } } ] }, - "advancedValueOutputChannel" : { - "anyOf" : [ - {}, + "programmingLanguage" : { + "type" : "string", + "description" : "One of the programming languages supported by TESTed.", + "enum" : [ + "bash", + "c", + "haskell", + "java", + "javascript", + "kotlin", + "python", + "runhaskell", + "csharp" + ] + }, + "message": { + "oneOf": [ { - "type" : "string", - "description" : "A 'Python' value to parse and use as the expected type." + "type": "string", + "description" : "A simple message to display." }, { - "type" : "object", - "unevaluatedProperties" : false, - "description" : "A custom check function.", + "type": "object", "required" : [ - "value" + "description" ], "properties" : { - "value" : { + "description": { "type" : "string", - "description" : "The expected value." - } - }, - "oneOf" : [ - { - "properties" : { - "oracle" : { - "type" : "string", - "enum" : [ - "builtin" - ] - } - } + "description" : "The message to display." }, - { - "required" : [ - "oracle", - "file" - ], - "properties" : { - "oracle" : { - "type" : "string", - "enum" : [ - "custom_check" - ] - }, - "file" : { - "type" : "string", - "description" : "The path to the file containing the custom check function." - }, - "name" : { - "type" : "string", - "description" : "The name of the custom check function.", - "default" : "evaluate" - }, - "arguments" : { - "type" : "array", - "description" : "List of YAML (or tagged expression) values to use as arguments to the function." - } - } + "format": { + "type" : "string", + "default" : "text", + "description" : "The format of the message, either a programming language, 'text' or 'html'." } - ] + } } ] }, @@ -442,7 +634,6 @@ "type" : "object", "description" : "Configuration properties for textual comparison and to configure if the expected value should be hidden or not", "minProperties" : 1, - "unevaluatedProperties" : false, "properties" : { "applyRounding" : { "description" : "Apply rounding when comparing as float", @@ -474,61 +665,17 @@ } } }, - "file" : { - "type" : "object", - "description" : "Path to a file for input.", - "unevaluatedProperties" : false, - "required" : [ - "name", - "url" - ], - "properties" : { - "name" : { - "type" : "string", - "description" : "File name" - }, - "url" : { - "type" : "string", - "format" : "uri", - "description" : "Relative path to the file in the `description` folder of a Dodona exercise" - } - } - }, - "supportedLanguage" : { - "type" : "string", - "enum" : [ - "bash", - "c", - "haskell", - "java", - "javascript", - "kotlin", - "python", - "runhaskell", - "csharp" + "textualType": { + "description" : "Simple textual value, converted to string.", + "type" : [ + "string", + "number", + "integer", + "boolean" ] }, - "message": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "object", - "required" : [ - "description" - ], - "properties" : { - "description": { - "type" : "string" - }, - "format": { - "type" : "string", - "default" : "text" - } - } - } - ] + "yamlValue": { + "description" : "A value represented as YAML." } } } diff --git a/tested/dsl/translate_parser.py b/tested/dsl/translate_parser.py index b637969a..51e69b69 100644 --- a/tested/dsl/translate_parser.py +++ b/tested/dsl/translate_parser.py @@ -4,12 +4,15 @@ from collections.abc import Callable from decimal import Decimal from pathlib import Path -from typing import Any, Literal, TypeVar, cast +from typing import Any, Literal, Type, TypeVar, cast import yaml from attrs import define, evolve +from jsonschema import TypeChecker from jsonschema.exceptions import ValidationError -from jsonschema.validators import Draft201909Validator +from jsonschema.protocols import Validator +from jsonschema.validators import extend as extend_validator +from jsonschema.validators import validator_for from tested.datatypes import ( AdvancedNumericTypes, @@ -74,14 +77,12 @@ class TestedType: type: str | AllTypes -@define -class ExpressionString: - expression: str +class ExpressionString(str): + pass -@define -class ReturnOracle: - value: YamlDict +class ReturnOracle(dict): + pass OptionDict = dict[str, int | bool] @@ -117,7 +118,7 @@ def _custom_type_constructors(loader: yaml.Loader, node: yaml.Node) -> TestedTyp def _expression_string(loader: yaml.Loader, node: yaml.Node) -> ExpressionString: result = _parse_yaml_value(loader, node) assert isinstance(result, str), f"An expression must be a string, got {result}" - return ExpressionString(expression=result) + return ExpressionString(result) def _return_oracle(loader: yaml.Loader, node: yaml.Node) -> ReturnOracle: @@ -125,7 +126,7 @@ def _return_oracle(loader: yaml.Loader, node: yaml.Node) -> ReturnOracle: assert isinstance( result, dict ), f"A custom oracle must be an object, got {result} which is a {type(result)}." - return ReturnOracle(value=result) + return ReturnOracle(result) def _parse_yaml(yaml_stream: str) -> YamlObject: @@ -169,18 +170,42 @@ def _parse_yaml(yaml_stream: str) -> YamlObject: raise exc -def _load_schema_validator(): +def is_oracle(_checker: TypeChecker, instance: Any) -> bool: + return isinstance(instance, ReturnOracle) + + +def is_expression(_checker: TypeChecker, instance: Any) -> bool: + return isinstance(instance, ExpressionString) + + +def test(value: object) -> bool: + if not isinstance(value, str): + return False + import ast + + ast.parse(value) + return True + + +def load_schema_validator(file: str = "schema-strict.json") -> Validator: """ Load the JSON Schema validator used to check DSL test suites. """ - path_to_schema = Path(__file__).parent / "schema.json" + path_to_schema = Path(__file__).parent / file with open(path_to_schema, "r") as schema_file: schema_object = json.load(schema_file) - Draft201909Validator.check_schema(schema_object) - return Draft201909Validator(schema_object) + + original_validator: Type[Validator] = validator_for(schema_object) + type_checker = original_validator.TYPE_CHECKER.redefine( + "oracle", is_oracle + ).redefine("expression", is_expression) + format_checker = original_validator.FORMAT_CHECKER + format_checker.checks("tested-dsl-expression", SyntaxError)(test) + tested_validator = extend_validator(original_validator, type_checker=type_checker) + return tested_validator(schema_object, format_checker=format_checker) -_SCHEMA_VALIDATOR = _load_schema_validator() +_SCHEMA_VALIDATOR = load_schema_validator() class DslValidationError(ValueError): @@ -227,7 +252,7 @@ def convert_validation_error_to_group( if not error.context and not error.cause: if len(error.message) > 150: message = error.message.replace(str(error.instance), "") - note = "With being: " + str(error.instance) + note = "With being: " + textwrap.shorten(str(error.instance), 500) else: message = error.message note = None @@ -382,7 +407,7 @@ def _convert_text_output_channel(stream: YamlObject) -> TextOutputChannel: def _convert_yaml_value(stream: YamlObject) -> Value | None: if isinstance(stream, ExpressionString): # We have an expression string. - value = parse_string(stream.expression, is_return=True) + value = parse_string(stream, is_return=True) elif isinstance(stream, (int, float, bool, TestedType, list, set, str, dict)): # Simple values where no confusion is possible. value = _convert_value(stream) @@ -396,7 +421,7 @@ def _convert_yaml_value(stream: YamlObject) -> Value | None: def _convert_advanced_value_output_channel(stream: YamlObject) -> ValueOutputChannel: if isinstance(stream, ReturnOracle): - return_object = stream.value + return_object = stream value = _convert_yaml_value(return_object["value"]) assert isinstance(value, Value), "You must specify a value for a return oracle." if "oracle" not in return_object or return_object["oracle"] == "builtin": diff --git a/tested/features.py b/tested/features.py index 9109e23d..9b3006a9 100644 --- a/tested/features.py +++ b/tested/features.py @@ -23,7 +23,7 @@ from tested.dodona import ExtendedMessage, Message, Permission if TYPE_CHECKING: - from tested.languages.config import Language + from tested.languages.language import Language _logger = logging.getLogger(__name__) diff --git a/tested/internationalization/nl.yaml b/tested/internationalization/nl.yaml index 6d6fcebf..13ee515e 100644 --- a/tested/internationalization/nl.yaml +++ b/tested/internationalization/nl.yaml @@ -16,13 +16,11 @@ nl: runtime: "Runtime error" unexpected: "Onverwachte uitvoer" programmed: - student: - default: >- - Er ging iets fout op bij het evalueren van de oplossing. - Meld dit aan de lesgever! - result: "Het resultaat van de geprogrammeerde evaluatie is ongeldig:" - stdout: "Dit werd geproduceerd op stdout:" - stderr: "Dit werd geproduceerd op stderr:" + student: >- + Er ging iets fout op bij het evalueren van de oplossing. + Meld dit aan de lesgever! + stdout: "Het evalueren van de oplossing genereerde deze uitvoer op stderr:" + stderr: "Het evalueren van de oplossing genereerde deze uitvoer op stdout:" specific: student: default: >- diff --git a/tested/judge/compilation.py b/tested/judge/compilation.py index b9367a5c..b079285e 100644 --- a/tested/judge/compilation.py +++ b/tested/judge/compilation.py @@ -15,7 +15,7 @@ filter_files, run_command, ) -from tested.languages.config import FileFilter, Language +from tested.languages.language import FileFilter, Language from tested.languages.utils import convert_stacktrace_to_clickable_feedback _logger = logging.getLogger(__name__) diff --git a/tested/judge/planning.py b/tested/judge/planning.py index 9c22d8d5..827d5385 100644 --- a/tested/judge/planning.py +++ b/tested/judge/planning.py @@ -11,8 +11,8 @@ from tested.configs import Bundle from tested.dodona import AnnotateCode, Message, Status -from tested.languages.config import FileFilter from tested.languages.conventionalize import execution_name +from tested.languages.language import FileFilter from tested.testsuite import Context, EmptyChannel, MainInput diff --git a/tested/judge/utils.py b/tested/judge/utils.py index 1daedef0..c50a00b2 100644 --- a/tested/judge/utils.py +++ b/tested/judge/utils.py @@ -10,8 +10,8 @@ from attrs import define from tested.configs import Bundle -from tested.languages.config import FileFilter from tested.languages.conventionalize import EXECUTION_PREFIX +from tested.languages.language import FileFilter _logger = logging.getLogger(__name__) diff --git a/tested/languages/__init__.py b/tested/languages/__init__.py index 77c7cd1f..a045fb26 100644 --- a/tested/languages/__init__.py +++ b/tested/languages/__init__.py @@ -11,12 +11,12 @@ from tested.languages.bash.config import Bash from tested.languages.c.config import C -from tested.languages.config import Language from tested.languages.csharp.config import CSharp from tested.languages.haskell.config import Haskell from tested.languages.java.config import Java from tested.languages.javascript.config import JavaScript from tested.languages.kotlin.config import Kotlin +from tested.languages.language import Language from tested.languages.python.config import Python from tested.languages.runhaskell.config import RunHaskell diff --git a/tested/languages/bash/config.py b/tested/languages/bash/config.py index 799bab5f..787ea82e 100644 --- a/tested/languages/bash/config.py +++ b/tested/languages/bash/config.py @@ -5,12 +5,6 @@ from tested.datatypes import AdvancedStringTypes, AllTypes, BasicStringTypes from tested.dodona import AnnotateCode, Message from tested.features import Construct, TypeSupport -from tested.languages.config import ( - CallbackResult, - Command, - Language, - TypeDeclarationMetadata, -) from tested.languages.conventionalize import ( EXECUTION_PREFIX, Conventionable, @@ -18,6 +12,12 @@ submission_file, submission_name, ) +from tested.languages.language import ( + CallbackResult, + Command, + Language, + TypeDeclarationMetadata, +) from tested.serialisation import Statement, Value if TYPE_CHECKING: diff --git a/tested/languages/c/config.py b/tested/languages/c/config.py index fb71063e..cb7c8e6a 100644 --- a/tested/languages/c/config.py +++ b/tested/languages/c/config.py @@ -6,18 +6,18 @@ from tested.datatypes import AllTypes from tested.dodona import AnnotateCode, Message from tested.features import Construct, TypeSupport -from tested.languages.config import ( - CallbackResult, - Command, - Language, - TypeDeclarationMetadata, -) from tested.languages.conventionalize import ( EXECUTION_PREFIX, Conventionable, NamingConventions, submission_file, ) +from tested.languages.language import ( + CallbackResult, + Command, + Language, + TypeDeclarationMetadata, +) from tested.languages.utils import executable_name from tested.serialisation import Statement, Value diff --git a/tested/languages/csharp/config.py b/tested/languages/csharp/config.py index 4ee7a374..1f97335c 100644 --- a/tested/languages/csharp/config.py +++ b/tested/languages/csharp/config.py @@ -6,12 +6,6 @@ from tested.datatypes import AllTypes from tested.dodona import AnnotateCode, Message, Status from tested.features import Construct, TypeSupport -from tested.languages.config import ( - CallbackResult, - Command, - Language, - TypeDeclarationMetadata, -) from tested.languages.conventionalize import ( EXECUTION_PREFIX, Conventionable, @@ -20,6 +14,12 @@ submission_file, submission_name, ) +from tested.languages.language import ( + CallbackResult, + Command, + Language, + TypeDeclarationMetadata, +) from tested.serialisation import Statement, Value logger = logging.getLogger(__name__) diff --git a/tested/languages/haskell/config.py b/tested/languages/haskell/config.py index 524264ac..7b45168d 100644 --- a/tested/languages/haskell/config.py +++ b/tested/languages/haskell/config.py @@ -5,17 +5,17 @@ from tested.datatypes import AllTypes from tested.dodona import AnnotateCode, Message from tested.features import Construct, TypeSupport -from tested.languages.config import ( - CallbackResult, - Command, - Language, - TypeDeclarationMetadata, -) from tested.languages.conventionalize import ( Conventionable, NamingConventions, submission_file, ) +from tested.languages.language import ( + CallbackResult, + Command, + Language, + TypeDeclarationMetadata, +) from tested.languages.utils import ( cleanup_description, executable_name, diff --git a/tested/languages/java/config.py b/tested/languages/java/config.py index 798d4974..72c08538 100644 --- a/tested/languages/java/config.py +++ b/tested/languages/java/config.py @@ -11,17 +11,17 @@ ) from tested.dodona import AnnotateCode, Message from tested.features import Construct, TypeSupport -from tested.languages.config import ( - CallbackResult, - Command, - Language, - TypeDeclarationMetadata, -) from tested.languages.conventionalize import ( Conventionable, NamingConventions, submission_file, ) +from tested.languages.language import ( + CallbackResult, + Command, + Language, + TypeDeclarationMetadata, +) from tested.languages.utils import jvm_cleanup_stacktrace, jvm_memory_limit from tested.serialisation import Statement, Value diff --git a/tested/languages/javascript/config.py b/tested/languages/javascript/config.py index cecfe55f..c9eb9a3b 100644 --- a/tested/languages/javascript/config.py +++ b/tested/languages/javascript/config.py @@ -11,12 +11,6 @@ ) from tested.dodona import AnnotateCode, Message from tested.features import Construct, TypeSupport -from tested.languages.config import ( - CallbackResult, - Command, - Language, - TypeDeclarationMetadata, -) from tested.languages.conventionalize import ( EXECUTION_PREFIX, Conventionable, @@ -24,6 +18,12 @@ submission_file, submission_name, ) +from tested.languages.language import ( + CallbackResult, + Command, + Language, + TypeDeclarationMetadata, +) from tested.languages.utils import cleanup_description from tested.serialisation import Statement, Value diff --git a/tested/languages/kotlin/config.py b/tested/languages/kotlin/config.py index 36380997..4610fd3e 100644 --- a/tested/languages/kotlin/config.py +++ b/tested/languages/kotlin/config.py @@ -12,12 +12,6 @@ ) from tested.dodona import AnnotateCode, Message, Status from tested.features import Construct, TypeSupport -from tested.languages.config import ( - CallbackResult, - Command, - Language, - TypeDeclarationMetadata, -) from tested.languages.conventionalize import ( EXECUTION_PREFIX, Conventionable, @@ -25,6 +19,12 @@ conventionalize_namespace, submission_file, ) +from tested.languages.language import ( + CallbackResult, + Command, + Language, + TypeDeclarationMetadata, +) from tested.languages.utils import jvm_cleanup_stacktrace, jvm_memory_limit from tested.serialisation import Statement, Value diff --git a/tested/languages/config.py b/tested/languages/language.py similarity index 97% rename from tested/languages/config.py rename to tested/languages/language.py index 9a559bb0..f78eb4ba 100644 --- a/tested/languages/config.py +++ b/tested/languages/language.py @@ -6,11 +6,11 @@ """ import logging -import typing from abc import ABC, abstractmethod +from collections import defaultdict from collections.abc import Callable from pathlib import Path -from typing import NotRequired, Optional, TypedDict +from typing import TYPE_CHECKING, NotRequired, Optional, TypedDict from tested.datatypes import AllTypes, ExpressionTypes from tested.dodona import AnnotateCode, Message, Status @@ -23,8 +23,9 @@ submission_name, ) from tested.serialisation import Statement, Value +from tested.testsuite import SupportedLanguage -if typing.TYPE_CHECKING: +if TYPE_CHECKING: from tested.configs import GlobalConfig from tested.languages.generation import PreparedExecutionUnit @@ -35,6 +36,11 @@ _logger = logging.getLogger(__name__) +STRING_QUOTES: dict[SupportedLanguage, str] = defaultdict( + lambda: '"', {SupportedLanguage.PYTHON: "'", SupportedLanguage.BASH: "'"} +) + + class TypeDeclarationMetadata(TypedDict): names: dict[AllTypes, str | tuple[bool, str]] inner_names: NotRequired[dict[AllTypes, str]] @@ -161,9 +167,10 @@ def execution(self, cwd: Path, file: str, arguments: list[str]) -> Command: def get_string_quote(self) -> str: """ - :return: The symbol used to quote strings. + :return: The quote symbol used to quote strings. """ - return '"' + assert self.config + return STRING_QUOTES[self.config.dodona.programming_language] @abstractmethod def naming_conventions(self) -> dict[Conventionable, NamingConventions]: diff --git a/tested/languages/preparation.py b/tested/languages/preparation.py index a29530c2..3521c6b6 100644 --- a/tested/languages/preparation.py +++ b/tested/languages/preparation.py @@ -51,7 +51,7 @@ if TYPE_CHECKING: from tested.judge.planning import PlannedExecutionUnit - from tested.languages.config import Language + from tested.languages.language import Language # Names of the predefined functions that must be available. SEND_VALUE = "send_value" diff --git a/tested/languages/python/config.py b/tested/languages/python/config.py index bf422c43..cb495054 100644 --- a/tested/languages/python/config.py +++ b/tested/languages/python/config.py @@ -12,17 +12,17 @@ ) from tested.dodona import AnnotateCode, Message, Severity from tested.features import Construct, TypeSupport -from tested.languages.config import ( - CallbackResult, - Command, - Language, - TypeDeclarationMetadata, -) from tested.languages.conventionalize import ( Conventionable, NamingConventions, submission_file, ) +from tested.languages.language import ( + CallbackResult, + Command, + Language, + TypeDeclarationMetadata, +) from tested.serialisation import Statement, Value if TYPE_CHECKING: @@ -51,9 +51,6 @@ def supports_debug_information(self) -> bool: def file_extension(self) -> str: return "py" - def get_string_quote(self) -> str: - return "'" - def naming_conventions(self) -> dict[Conventionable, NamingConventions]: return {"class": "pascal_case", "global_identifier": "macro_case"} diff --git a/tested/languages/runhaskell/config.py b/tested/languages/runhaskell/config.py index b0cfebb5..69d9b174 100644 --- a/tested/languages/runhaskell/config.py +++ b/tested/languages/runhaskell/config.py @@ -1,8 +1,8 @@ from pathlib import Path -from tested.languages.config import CallbackResult, Command from tested.languages.conventionalize import submission_file from tested.languages.haskell.config import Haskell +from tested.languages.language import CallbackResult, Command class RunHaskell(Haskell): diff --git a/tested/languages/utils.py b/tested/languages/utils.py index c62fd116..3ecfd2c4 100644 --- a/tested/languages/utils.py +++ b/tested/languages/utils.py @@ -13,7 +13,7 @@ from tested.serialisation import FunctionCall, StringType if TYPE_CHECKING: - from tested.languages.config import Language + from tested.languages.language import Language _logger = logging.getLogger(__name__) code_regex = re.compile(r":(\d+)") diff --git a/tested/oracles/programmed.py b/tested/oracles/programmed.py index bee5e0e3..938a7c2d 100644 --- a/tested/oracles/programmed.py +++ b/tested/oracles/programmed.py @@ -68,19 +68,24 @@ def _catch_output() -> Generator[tuple[StringIO, StringIO], None, None]: sys.stderr = old_stderr -def _evaluate_programmed( - bundle: Bundle, - oracle: CustomCheckOracle, - context: OracleContext, -) -> BaseExecutionResult | BooleanEvalResult: - """ - Run the custom evaluation. Concerning structure and execution, the custom - oracle is very similar to the execution of the whole evaluation. It a - mini-evaluation if you will. +def _execute_custom_check_function( + bundle: Bundle, oracle: CustomCheckOracle, context: OracleContext +): """ - _logger.debug("Doing evaluation in Python mode.") + Execute a custom check function, returning the captured stdout and stderr if + the execution got to that point. - # Create a configs bundle for the language of the oracle. + This function will throw various errors, depending on where in the process it + might fail. For example, invalid syntax will result in SyntaxErrors, but all + exceptions raised by the custom oracles also need to be caught. + + :param bundle: The bundle of the original execution. + :param oracle: The oracle that is executing. + :param context: The context of said oracle. + + :return: A tuple with (result, stdout, stderr), but all can be None. + """ + # Create a config bundle for Python, the programming language of the oracle. eval_bundle = create_bundle(bundle.config, bundle.out, bundle.suite, "python") # Path to the oracle. @@ -102,62 +107,86 @@ def _evaluate_programmed( "__tested_context__": ConvertedOracleContext.from_context(eval_bundle, context), } exec("import sys\n" "sys.modules['evaluation_utils'] = __tested_test__", global_env) - # Make the oracle available. + + # Make the oracle available. This will fail on syntax errors. exec(evaluator_code, global_env) - # Since we pass a class value, we don't want to + # Create the function we will call. check_function_call = FunctionCall( type=FunctionType.FUNCTION, name=oracle.function.name, arguments=[Identifier("__tested_context__"), *oracle.arguments], ) + # The actual code for calling the function. literal_function_call = generate_statement(eval_bundle, check_function_call) + # Call the function while intercepting all output. with _catch_output() as (stdout_, stderr_): exec(f"__tested_test__result = {literal_function_call}", global_env) - + result_ = cast(BooleanEvalResult | None, global_env["__tested_test__result"]) stdout_ = stdout_.getvalue() stderr_ = stderr_.getvalue() + return result_, stdout_, stderr_ + + +def _evaluate_programmed( + bundle: Bundle, + oracle: CustomCheckOracle, + context: OracleContext, +) -> BaseExecutionResult | BooleanEvalResult: + """ + Run the custom evaluation. This will call a function to do the execution, but + mainly provides error handling. + """ + + result_ = None + stdout_ = None + stderr_ = None messages = [] - if stdout_: + try: + result_, stdout_, stderr_ = _execute_custom_check_function( + bundle, oracle, context + ) + except SyntaxError as e: + # The oracle might be rubbish, so handle any exception. + _logger.exception(e) messages.append( ExtendedMessage( - description=get_i18n_string("judge.programmed.produced.stdout"), + description="The custom check oracle failed with the following syntax error:", format="text", + permission=Permission.STAFF, ) ) - messages.append(ExtendedMessage(description=stdout_, format="code")) - if stderr_: + tb = traceback.format_exc() + messages.append( + ExtendedMessage(description=tb, format="code", permission=Permission.STAFF) + ) + except Exception as e: + _logger.exception(e) messages.append( ExtendedMessage( - description=get_i18n_string("judge.programmed.produced.stderr"), + description="The custom check oracle failed with the following exception:", format="text", - permission=Permission.STUDENT, + permission=Permission.STAFF, ) ) + tb = traceback.format_exc() messages.append( - ExtendedMessage( - description=stderr_, format="code", permission=Permission.STAFF - ) + ExtendedMessage(description=tb, format="code", permission=Permission.STAFF) ) - result_ = cast(BooleanEvalResult | None, global_env["__tested_test__result"]) + if stdout_: + messages.append(get_i18n_string("judge.programmed.produced.stdout")) + messages.append(ExtendedMessage(description=stdout_, format="code")) + if stderr_: + messages.append(get_i18n_string("judge.programmed.produced.stderr")) + messages.append(ExtendedMessage(description=stderr_, format="code")) # If the result is None, the oracle is broken. if result_ is None: - messages.append( - ExtendedMessage( - description=get_i18n_string("judge.programmed.student"), format="text" - ) - ) - messages.append( - ExtendedMessage( - description=get_i18n_string("judge.programmed.failed"), - format="text", - permission=Permission.STAFF, - ) - ) + messages.append(get_i18n_string("judge.programmed.student")) + messages.append("The custom check oracle did not produce a valid return value.") return BooleanEvalResult( result=Status.INTERNAL_ERROR, readable_expected=None, diff --git a/tests/exercises/division/evaluation/Evaluator.cs b/tests/exercises/division/evaluation/Evaluator.cs index 1e6508b1..932ad9d3 100644 --- a/tests/exercises/division/evaluation/Evaluator.cs +++ b/tests/exercises/division/evaluation/Evaluator.cs @@ -10,4 +10,8 @@ public static EvaluationResult Evaluate(object? actual) { return new EvaluationResult(false, "System.DivideByZeroException", actual == null ? "" : actual.ToString(), messages); } } + + public static EvaluationResult Runtime(object? actual) { + throw new ArgumentOutOfRangeException("hello"); + } } diff --git a/tests/exercises/division/evaluation/Evaluator.hs b/tests/exercises/division/evaluation/Evaluator.hs index 613eeb0f..95340379 100644 --- a/tests/exercises/division/evaluation/Evaluator.hs +++ b/tests/exercises/division/evaluation/Evaluator.hs @@ -20,6 +20,14 @@ evaluate (Just x) = } +runtime :: Maybe (SomeException) -> EvaluationResult +runtime _ = evaluationResult { + readableExpected = Just $ show DivideByZero, + readableActual = Just $ show (100 `div` 0), + messages = [message "Expected DivideByZero, got nothing."] + } + + handleA :: ArithException -> EvaluationResult handleA DivideByZero = evaluationResult { result = True, @@ -30,4 +38,4 @@ handleA other = evaluationResult { readableExpected = Just $ show DivideByZero, readableActual = Just $ show other, messages = [message "Expected DivideByZero, got something else."] - } \ No newline at end of file + } diff --git a/tests/exercises/division/evaluation/Evaluator.java b/tests/exercises/division/evaluation/Evaluator.java index d0c87408..8a12ad75 100644 --- a/tests/exercises/division/evaluation/Evaluator.java +++ b/tests/exercises/division/evaluation/Evaluator.java @@ -14,4 +14,8 @@ public static EvaluationResult evaluate(Object actual) { } } -} \ No newline at end of file + public static EvaluationResult runtime(Object actual) { + throw new RuntimeException("Something went wrong!"); + } + +} diff --git a/tests/exercises/division/evaluation/Evaluator.kt b/tests/exercises/division/evaluation/Evaluator.kt index 62ab6c7c..10a2c25b 100644 --- a/tests/exercises/division/evaluation/Evaluator.kt +++ b/tests/exercises/division/evaluation/Evaluator.kt @@ -14,5 +14,10 @@ class Evaluator { .build() } } + + @JvmStatic + fun runtime(actual: Any?): EvaluationResult { + throw Exception("Hi There!") + } } -} \ No newline at end of file +} diff --git a/tests/exercises/division/evaluation/EvaluatorSyntaxError.cs b/tests/exercises/division/evaluation/EvaluatorSyntaxError.cs new file mode 100644 index 00000000..3ecbd3a8 --- /dev/null +++ b/tests/exercises/division/evaluation/EvaluatorSyntaxError.cs @@ -0,0 +1,9 @@ +using System; +using Tested; +a +public class Evaluator { zeffa ff v + public static E vxwvsages = new List() { new Message("Expected DivideByZeroException, got something else.") }; + return new EvaluationResult(false, "System.DivideByZeroException", actual == null ? "" : actual.ToString(), messages); + } + } +}vv qega diff --git a/tests/exercises/division/evaluation/EvaluatorSyntaxError.hs b/tests/exercises/division/evaluation/EvaluatorSyntaxError.hs new file mode 100644 index 00000000..c6fa7935 --- /dev/null +++ b/tests/exercises/division/evaluation/EvaluatorSyntaxError.hs @@ -0,0 +1,37 @@ +{-# LANGUAGE ScopedTypeVariables #-} +module Evaluator where + +import EvaluationUtils +import Control.Exception + +evaluate :: Maybe (SomeException) -> EvaluationResult +evaluate Nothing = evaluationResult { + readableExpected = Just $ show DivideByZero, + readableActual = Just "", + messages = [message "Expected DivideByZero, got nothing."] + } +evaluate (Just x) = + case fromException x off aegaeglho hapyat²uùµajµjoµjµ µg jùµj ùtjùpµtjùpjµj(&µj µjµajtpµj + + egkzmknzk oih + gcd + + + + + + zgg[message "Expected DivideByZero, got nothing."] + } + + +handleA :: ArithException -> EvaluationResult +handleA DivideByZero = evaluationResult { + result = True, + readableExpected = Just $ show DivideByZero, + readableActual = Just $ show DivideByZero + } +handleA other = evaluationResult { + readableExpected = Just $ show DivideByZero, + readableActual = Just $ show other, + messages = [message "Expected DivideByZero, got something else."] + } diff --git a/tests/exercises/division/evaluation/EvaluatorSyntaxError.java b/tests/exercises/division/evaluation/EvaluatorSyntaxError.java new file mode 100644 index 00000000..f41a093b --- /dev/null +++ b/tests/exercises/division/evaluation/EvaluatorSyntaxError.java @@ -0,0 +1,15 @@ +public class Evaluator { egqdg sd + public static EvaluationResult evaluate(Object actual) { + if (actual instanceof ArithmeticException) { + return EvaluationResuvd lt.builder(true) + .withReadableExpected(actual.toString()) + .withReadableActual(actual.toString()) + .build(); + } else { + return EvaluationResusdlt.builder(false) + .withReadableExpected("ArithmeticException") + .withReadableActual(actual == null ? "" : actual.toString()) + .withMessage(nbsd + } + +} sbsdgssdé§u u diff --git a/tests/exercises/division/evaluation/EvaluatorSyntaxError.kt b/tests/exercises/division/evaluation/EvaluatorSyntaxError.kt new file mode 100644 index 00000000..b065b225 --- /dev/null +++ b/tests/exercises/division/evaluation/EvaluatorSyntaxError.kt @@ -0,0 +1,16 @@ +class Evaluator { ae + companion object {t "t"&t + @JvmStatic zfz"r'" '" ' + fun evaluate(actual: Any?): EvaluationResult { + return if (actual is ArithmeticException) { + EvaluationResult.Builder(result = true, + readableExpected = actual.toString(), + aeg readableActual = actual.toString()).build() + } else { + EvalugtionResult.Builder(result = false, + readableExpected = "ArithmeticException", + readableActual = actual?.toString() ?: "") + .withMessage(EvaluationResult.Message("Expected ArithmeticException, got something else.")) + .build() + qg +}qd qsdvdvqd diff --git a/tests/exercises/division/evaluation/evaluator-syntax-error.py b/tests/exercises/division/evaluation/evaluator-syntax-error.py new file mode 100644 index 00000000..4accc118 --- /dev/null +++ b/tests/exercises/division/evaluation/evaluator-syntax-error.py @@ -0,0 +1,18 @@ + jdhbkd mbzough import traceback + +from evaluation_utils import EvaluationResult, Message + + +def evaluate(value): + if isinstance(value, ZeroDivisionError): + # If a zero division error, show the stacktrace. + formatted = "".join(traceback.format_exception(type(value), value, value.__traceback__)) + return EvaluationResult(True, formatted, formatted) + elif isinstance(value, Exception): + # If another error, show the stacktrace as well. + formatted = "".join(traceback.format_exception(type(value), value, value.__traceback__)) + return EvaluationResult(False, "ZeroDivisionError", formatted, [Message(f"Verwachtte een ZeroDivisionError, maar kreeg een {type(value).__name__}.")]) + else: + # Else show the str of the value. + actual = str(value) if value else "" + return EvaluationResult(False, "ZeroDivisionError", actual, [Message("Verwachtte een ZeroDivisionError.")]) diff --git a/tests/exercises/division/evaluation/evaluator.py b/tests/exercises/division/evaluation/evaluator.py index 51bfddc7..58832e1e 100644 --- a/tests/exercises/division/evaluation/evaluator.py +++ b/tests/exercises/division/evaluation/evaluator.py @@ -16,3 +16,7 @@ def evaluate(value): # Else show the str of the value. actual = str(value) if value else "" return EvaluationResult(False, "ZeroDivisionError", actual, [Message("Verwachtte een ZeroDivisionError.")]) + + +def runtime(_value): + raise ValueError("Hallo") diff --git a/tests/exercises/division/evaluation/plan-runtime-exception.json b/tests/exercises/division/evaluation/plan-runtime-exception.json new file mode 100644 index 00000000..506b5dee --- /dev/null +++ b/tests/exercises/division/evaluation/plan-runtime-exception.json @@ -0,0 +1,50 @@ +{ + "tabs": [ + { + "name": "Feedback", + "runs": [ + { + "run": { + "description": "Uitvoeren code", + "input": { + "main_call": true + }, + "output": { + "exception": { + "evaluator": { + "type": "specific", + "evaluators": { + "python": { + "file": "evaluator.py", + "name": "runtime" + }, + "java": { + "file": "Evaluator.java", + "name": "runtime" + }, + "kotlin": { + "file": "Evaluator.kt", + "name": "runtime" + }, + "haskell": { + "file": "Evaluator.hs", + "name": "runtime" + }, + "runhaskell": { + "file": "Evaluator.hs", + "name": "runtime" + }, + "csharp": { + "file": "Evaluator.cs", + "name": "runtime" + } + } + } + } + } + } + } + ] + } + ] +} diff --git a/tests/exercises/division/evaluation/plan-syntax-error.json b/tests/exercises/division/evaluation/plan-syntax-error.json new file mode 100644 index 00000000..43e916ad --- /dev/null +++ b/tests/exercises/division/evaluation/plan-syntax-error.json @@ -0,0 +1,44 @@ +{ + "tabs": [ + { + "name": "Feedback", + "runs": [ + { + "run": { + "description": "Uitvoeren code", + "input": { + "main_call": true + }, + "output": { + "exception": { + "evaluator": { + "type": "specific", + "evaluators": { + "python": { + "file": "evaluator-syntax-error.py" + }, + "java": { + "file": "EvaluatorSyntaxError.java" + }, + "kotlin": { + "file": "EvaluatorSyntaxError.kt" + }, + "haskell": { + "file": "EvaluatorSyntaxError.hs" + }, + "runhaskell": { + "file": "EvaluatorSyntaxError.hs" + }, + "csharp": { + "file": "EvaluatorSyntaxError.cs" + } + } + } + } + } + } + } + ] + } + ] +} diff --git a/tests/exercises/echo-function/evaluation/evaluator.py b/tests/exercises/echo-function/evaluation/evaluator.py index 02304728..453be3e4 100644 --- a/tests/exercises/echo-function/evaluation/evaluator.py +++ b/tests/exercises/echo-function/evaluation/evaluator.py @@ -18,3 +18,7 @@ def evaluate_value_dsl(context): dsl_expected="{5, 5}", dsl_actual="{4, 4}" ) + + +def evaluate_runtime_crash(context): + return len(context) / 0 diff --git a/tests/exercises/echo-function/evaluation/evaluator_syntax_error.py b/tests/exercises/echo-function/evaluation/evaluator_syntax_error.py new file mode 100644 index 00000000..1c9398e7 --- /dev/null +++ b/tests/exercises/echo-function/evaluation/evaluator_syntax_error.py @@ -0,0 +1,6 @@ +# noinspection PyUnresolvedReferences +from evaluation_utils import EvaluationResult, Message + + +evaluate(context): + return len(context) / 0 diff --git a/tests/exercises/echo-function/evaluation/programmed_crash.yaml b/tests/exercises/echo-function/evaluation/programmed_crash.yaml new file mode 100644 index 00000000..644d338a --- /dev/null +++ b/tests/exercises/echo-function/evaluation/programmed_crash.yaml @@ -0,0 +1,9 @@ +- tab: "My tab" + contexts: + - testcases: + - expression: 'echo("input-1")' + return: !oracle + oracle: "custom_check" + file: "evaluator.py" + name: "evaluate_runtime_crash" + value: "input-2" diff --git a/tests/exercises/echo-function/evaluation/programmed_missing.yaml b/tests/exercises/echo-function/evaluation/programmed_missing.yaml new file mode 100644 index 00000000..29e62b1a --- /dev/null +++ b/tests/exercises/echo-function/evaluation/programmed_missing.yaml @@ -0,0 +1,9 @@ +- tab: "My tab" + contexts: + - testcases: + - expression: 'echo("input-1")' + return: !oracle + oracle: "custom_check" + file: "evaluator_syntax_error.py" + name: "this_does_not_exist" + value: "input-2" diff --git a/tests/exercises/echo-function/evaluation/programmed_syntax_error.yaml b/tests/exercises/echo-function/evaluation/programmed_syntax_error.yaml new file mode 100644 index 00000000..2ff4d24f --- /dev/null +++ b/tests/exercises/echo-function/evaluation/programmed_syntax_error.yaml @@ -0,0 +1,9 @@ +- tab: "My tab" + contexts: + - testcases: + - expression: 'echo("input-1")' + return: !oracle + oracle: "custom_check" + file: "evaluator_syntax_error.py" + name: "evaluate" + value: "input-2" diff --git a/tests/language_markers.py b/tests/language_markers.py new file mode 100644 index 00000000..3a2acb76 --- /dev/null +++ b/tests/language_markers.py @@ -0,0 +1,21 @@ +from tested.languages import LANGUAGES + +COMPILE_LANGUAGES = [ + "python", + "java", + "c", + "kotlin", + "haskell", + "csharp", +] +ALL_SPECIFIC_LANGUAGES = COMPILE_LANGUAGES + [ + "javascript", + "runhaskell", +] +ALL_LANGUAGES = ALL_SPECIFIC_LANGUAGES + ["bash"] + +EXCEPTION_LANGUAGES = ["python", "java", "kotlin", "csharp", "haskell"] + + +def test_no_missing_languages_from_tests(): + assert sorted(ALL_LANGUAGES) == sorted(LANGUAGES.keys()) diff --git a/tests/manual_utils.py b/tests/manual_utils.py index afce524d..23282f4b 100644 --- a/tests/manual_utils.py +++ b/tests/manual_utils.py @@ -3,6 +3,7 @@ from io import StringIO from pathlib import Path +import pytest from jsonschema import validate from tested.cli import CommandDict, split_output @@ -13,8 +14,8 @@ from tested.utils import recursive_dict_merge -def assert_valid_output(output: str, config) -> CommandDict: - with open(Path(config.rootdir) / "tests/partial_output.json", "r") as f: +def assert_valid_output(output: str, config: pytest.Config) -> CommandDict: + with open(config.rootpath / "tests/partial_output.json", "r") as f: schema = json.load(f) updates = CommandDict() @@ -28,15 +29,15 @@ def assert_valid_output(output: str, config) -> CommandDict: def configuration( - config, + config: pytest.Config, exercise: str, language: str, work_dir: Path, suite: str = "plan.json", solution: str = "solution", - options=None, + options: dict | None = None, ) -> DodonaConfig: - exercise_dir = Path(config.rootdir) / "tests" / "exercises" + exercise_dir = config.rootpath / "tests" / "exercises" ep = exercise_dir / exercise return exercise_configuration( config, ep, language, work_dir, suite, solution, options @@ -44,7 +45,7 @@ def configuration( def exercise_configuration( - config, + config: pytest.Config, exercise_directory: Path, language: str, work_dir: Path, @@ -64,7 +65,7 @@ def exercise_configuration( "natural_language": "nl", "resources": exercise_directory / "evaluation", "source": exercise_directory / "solution" / f"{solution}.{ext}", - "judge": Path(f"{config.rootdir}"), + "judge": config.rootpath, "workdir": work_dir, "test_suite": suite, "options": {"linter": False}, diff --git a/tests/test_collector.py b/tests/test_collector.py index c905b18a..0976c092 100644 --- a/tests/test_collector.py +++ b/tests/test_collector.py @@ -1,6 +1,8 @@ from io import StringIO from pathlib import Path +import pytest + from tested.configs import create_bundle from tested.dodona import ( CloseContext, @@ -80,7 +82,7 @@ ) -def test_mid_judgement_is_completed(tmp_path: Path, pytestconfig): +def test_mid_judgement_is_completed(tmp_path: Path, pytestconfig: pytest.Config): conf = configuration(pytestconfig, "", SupportedLanguage.JAVASCRIPT, tmp_path) result = StringIO() bundle = create_bundle(conf, result, TEST_SUITE) @@ -127,7 +129,7 @@ def test_mid_judgement_is_completed(tmp_path: Path, pytestconfig): ] -def test_mid_context_is_completed(tmp_path: Path, pytestconfig): +def test_mid_context_is_completed(tmp_path: Path, pytestconfig: pytest.Config): conf = configuration(pytestconfig, "", SupportedLanguage.JAVASCRIPT, tmp_path) result = StringIO() bundle = create_bundle(conf, result, TEST_SUITE) @@ -158,7 +160,7 @@ def test_mid_context_is_completed(tmp_path: Path, pytestconfig): ] -def test_mid_tab_is_completed(tmp_path: Path, pytestconfig): +def test_mid_tab_is_completed(tmp_path: Path, pytestconfig: pytest.Config): conf = configuration(pytestconfig, "", SupportedLanguage.JAVASCRIPT, tmp_path) result = StringIO() bundle = create_bundle(conf, result, TEST_SUITE) diff --git a/tests/test_dsl_expression.py b/tests/test_dsl_expression.py index 2f8e89a3..88695f84 100644 --- a/tests/test_dsl_expression.py +++ b/tests/test_dsl_expression.py @@ -1,3 +1,5 @@ +# type: ignore[reportAttributeAccessIssue] + import math import pytest diff --git a/tests/test_dsl_yaml.py b/tests/test_dsl_yaml.py index 7e3137bb..441fa095 100644 --- a/tests/test_dsl_yaml.py +++ b/tests/test_dsl_yaml.py @@ -1,7 +1,9 @@ +# type: ignore[reportAttributeAccessIssue] import json from pathlib import Path import pytest +from jsonschema.validators import validator_for from tested.datatypes import ( AdvancedNumericTypes, @@ -18,6 +20,7 @@ StringTypes, ) from tested.dsl import parse_dsl, translate_to_test_suite +from tested.dsl.translate_parser import load_schema_validator from tested.serialisation import ( FunctionCall, NumberType, @@ -1100,3 +1103,20 @@ def test_empty_text_data_newlines(): suite = parse_test_suite(json_str) actual_stderr = suite.tabs[0].contexts[0].testcases[0].output.stderr.data assert actual_stderr == "" + + +def test_strict_json_schema_is_valid(): + path_to_schema = Path(__file__).parent / "tested-draft7.json" + with open(path_to_schema, "r") as schema_file: + schema_object = json.load(schema_file) + + validator = load_schema_validator() + meta_validator = validator_for(schema_object)(schema_object) + + meta_validator.validate(validator.schema) + + +def test_editor_json_schema_is_valid(): + validator = load_schema_validator("schema.json") + assert isinstance(validator.schema, dict) + validator.check_schema(validator.schema) diff --git a/tests/test_functionality.py b/tests/test_functionality.py index 3cf3b3c2..d1dcb24e 100644 --- a/tests/test_functionality.py +++ b/tests/test_functionality.py @@ -7,57 +7,27 @@ tests/) as the working directory. """ -import shutil import sys from pathlib import Path import pytest +from pytest_mock import MockerFixture from tested.configs import create_bundle -from tested.datatypes import BasicBooleanTypes, BasicNumericTypes, BasicStringTypes from tested.judge.execution import ExecutionResult from tested.languages import LANGUAGES -from tested.languages.conventionalize import submission_name -from tested.languages.generation import generate_statement, get_readable_input -from tested.serialisation import ( - BooleanType, - FunctionCall, - FunctionType, - NumberType, - StringType, -) +from tested.languages.generation import get_readable_input from tested.testsuite import Context, MainInput, Suite, Tab, Testcase, TextData +from tests.language_markers import ( + ALL_LANGUAGES, + ALL_SPECIFIC_LANGUAGES, + EXCEPTION_LANGUAGES, +) from tests.manual_utils import assert_valid_output, configuration, execute_config -COMPILE_LANGUAGES = [ - "python", - "java", - "c", - "kotlin", - pytest.param("haskell", marks=pytest.mark.haskell), - "csharp", -] -ALL_SPECIFIC_LANGUAGES = COMPILE_LANGUAGES + [ - "javascript", - pytest.param("runhaskell", marks=pytest.mark.haskell), -] -ALL_LANGUAGES = ALL_SPECIFIC_LANGUAGES + ["bash"] - -quotes = { - "python": "'", - "java": '"', - "c": '"', - "kotlin": '"', - "haskell": '"', - "javascript": '"', - "runhaskell": '"', - "bash": "'", - "csharp": '"', -} - @pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_global_variable(language: str, tmp_path: Path, pytestconfig): +def test_global_variable(language: str, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "global", language, tmp_path, "one.tson", "correct" ) @@ -67,266 +37,32 @@ def test_global_variable(language: str, tmp_path: Path, pytestconfig): @pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_global_variable_yaml(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, "global", language, tmp_path, "plan.yaml", "correct" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - - -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_io_exercise(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, "echo", language, tmp_path, "one.tson", "correct" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - - -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_io_exercise_wrong(language: str, tmp_path: Path, pytestconfig): - conf = configuration(pytestconfig, "echo", language, tmp_path, "one.tson", "wrong") - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong"] - - -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_simple_programmed_eval(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, - "echo", - language, - tmp_path, - "one-programmed-correct.tson", - "correct", - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - - -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_simple_programmed_eval_wrong(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, "echo", language, tmp_path, "one-programmed-wrong.tson", "correct" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong"] - - -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_io_function_exercise(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, "echo-function", language, tmp_path, "one.tson", "correct" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - - -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_io_function_file_exercise(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, "echo-function-file", language, tmp_path, "one.tson", "correct" - ) - shutil.copytree( - Path(conf.resources).parent / "workdir", tmp_path, dirs_exist_ok=True - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - - -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_io_function_additional_source_files( - language: str, tmp_path: Path, pytestconfig +def test_global_variable_yaml( + language: str, tmp_path: Path, pytestconfig: pytest.Config ): conf = configuration( - pytestconfig, - "echo-function-additional-source-files", - language, - tmp_path, - "one.tson", - "correct", - ) - shutil.copytree( - Path(conf.resources).parent / "workdir", tmp_path, dirs_exist_ok=True - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - - -@pytest.mark.parametrize("exercise", ["echo-function-file", "echo-function"]) -def test_javascript_async(exercise: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, exercise, "javascript", tmp_path, "one.tson", "correct-async" - ) - workdir = Path(conf.resources).parent / "workdir" - if workdir.exists(): - shutil.copytree(workdir, tmp_path, dirs_exist_ok=True) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - - -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_io_function_escape_exercise(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, "echo-function", language, tmp_path, "one-escape.tson", "correct" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - - -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_io_function_display_multiline_exercise( - language: str, tmp_path: Path, pytestconfig -): - conf = configuration( - pytestconfig, - "echo-function", - language, - tmp_path, - "one-display-multiline.tson", - "correct", + pytestconfig, "global", language, tmp_path, "plan.yaml", "correct" ) result = execute_config(conf) updates = assert_valid_output(result, pytestconfig) assert updates.find_status_enum() == ["correct"] - start_test = updates.find_all("start-test") - close_test = updates.find_all("close-test") - assert 1 == len(start_test) - assert 1 == len(close_test) - assert "return" == start_test[0].get("channel", "") - expected, actual = start_test[0].get("expected", ""), close_test[0].get( - "generated", "" - ) - quote = quotes[language] - assert expected[0] != quote and expected[-1] != quote - assert actual[0] != quote and actual[-1] != quote -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_io_function_display_no_multiline_exercise( - language: str, tmp_path: Path, pytestconfig +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_generic_exception_wrong( + lang: str, tmp_path: Path, pytestconfig: pytest.Config ): conf = configuration( - pytestconfig, - "echo-function", - language, - tmp_path, - "one-display-no-multiline.tson", - "correct", - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - start_test = updates.find_all("start-test") - close_test = updates.find_all("close-test") - assert 1 == len(start_test) - assert 1 == len(close_test) - assert "return" == start_test[0].get("channel", "") - expected, actual = start_test[0].get("expected", ""), close_test[0].get( - "generated", "" - ) - quote = quotes[language] - assert expected[0] == quote and expected[-1] == quote - assert actual[0] == quote and actual[-1] == quote - - -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_io_function_nested_call_exercise(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, "echo-function", language, tmp_path, "one-nested.yaml", "correct" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - - -@pytest.mark.haskell -@pytest.mark.parametrize("language", ("haskell", "runhaskell")) -def test_io_function_exercise_haskell_io(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, "echo-function", language, tmp_path, "one.tson", "correct_io" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - - -@pytest.mark.parametrize("language", ALL_SPECIFIC_LANGUAGES) -def test_specific_evaluation(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, - "echo-function", - language, - tmp_path, - "two-specific.tson", - "correct", - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong", "correct"] - assert len(updates.find_all("append-message")) == 2 - - -@pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_programmed_evaluation(language: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, - "echo-function", - language, - tmp_path, - "programmed.tson", - "correct", - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - assert len(updates.find_all("append-message")) - - -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_exception_correct(lang: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, "division", lang, tmp_path, "plan.json", "correct" + pytestconfig, "division", lang, tmp_path, "plan-generic-exception.json", "wrong" ) result = execute_config(conf) updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] + assert updates.find_status_enum() == ["wrong"] -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_generic_exception_correct( - lang: str, tmp_path: Path, pytestconfig +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_generic_exception_correct( + lang: str, tmp_path: Path, pytestconfig: pytest.Config ): conf = configuration( pytestconfig, @@ -341,38 +77,9 @@ def test_language_evaluator_generic_exception_correct( assert updates.find_status_enum() == ["correct"] -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_exception_wrong(lang: str, tmp_path: Path, pytestconfig): - conf = configuration(pytestconfig, "division", lang, tmp_path, "plan.json", "wrong") - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong"] - assert len(updates.find_all("append-message")) == 1 - - -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_generic_exception_wrong_error( - lang: str, tmp_path: Path, pytestconfig +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_generic_exception_wrong_error( + lang: str, tmp_path: Path, pytestconfig: pytest.Config ): conf = configuration( pytestconfig, @@ -387,52 +94,10 @@ def test_language_evaluator_generic_exception_wrong_error( assert updates.find_status_enum() == ["wrong"] -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_exception_wrong_error( - lang: str, tmp_path: Path, pytestconfig -): - conf = configuration( - pytestconfig, "division", lang, tmp_path, "plan.json", "wrong-error" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong"] - - -@pytest.mark.parametrize( - "lang", - [ - "python", - "java", - "kotlin", - "csharp", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), - ], -) -def test_language_evaluator_generic_exception_wrong( - lang: str, tmp_path: Path, pytestconfig -): - conf = configuration( - pytestconfig, "division", lang, tmp_path, "plan-generic-exception.json", "wrong" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong"] - - @pytest.mark.parametrize("lang", ["python", "java", "kotlin", "csharp"]) -def test_assignment_and_use_in_expression(lang: str, tmp_path: Path, pytestconfig): +def test_assignment_and_use_in_expression( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "isbn", lang, tmp_path, "one-with-assignment.tson", "solution" ) @@ -457,7 +122,9 @@ def test_assignment_and_use_in_expression(lang: str, tmp_path: Path, pytestconfi pytest.param("runhaskell", marks=pytest.mark.haskell), ], ) -def test_assignment_and_use_in_expression_list(lang: str, tmp_path: Path, pytestconfig): +def test_assignment_and_use_in_expression_list( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "isbn-list", @@ -477,7 +144,9 @@ def test_assignment_and_use_in_expression_list(lang: str, tmp_path: Path, pytest @pytest.mark.parametrize("lang", ["python", "java", "kotlin", "csharp"]) -def test_crashing_assignment_with_before(lang: str, tmp_path: Path, pytestconfig): +def test_crashing_assignment_with_before( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "isbn", @@ -504,7 +173,9 @@ def test_crashing_assignment_with_before(lang: str, tmp_path: Path, pytestconfig pytest.param("runhaskell", marks=pytest.mark.haskell), ], ) -def test_heterogeneous_arguments_are_detected(lang: str, tmp_path: Path, pytestconfig): +def test_heterogeneous_arguments_are_detected( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration(pytestconfig, "isbn", lang, tmp_path, "full.tson", "solution") result = execute_config(conf) updates = assert_valid_output(result, pytestconfig) @@ -512,7 +183,7 @@ def test_heterogeneous_arguments_are_detected(lang: str, tmp_path: Path, pytestc assert updates.find_status_enum() == ["internal error"] -def test_missing_key_types_detected(tmp_path: Path, pytestconfig): +def test_missing_key_types_detected(tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "objects", "python", tmp_path, "missing_key_types.yaml", "correct" ) @@ -522,7 +193,9 @@ def test_missing_key_types_detected(tmp_path: Path, pytestconfig): assert updates.find_status_enum() == ["internal error"] -def test_missing_key_types_detected_js_object(tmp_path: Path, pytestconfig): +def test_missing_key_types_detected_js_object( + tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "objects", @@ -541,7 +214,7 @@ def test_missing_key_types_detected_js_object(tmp_path: Path, pytestconfig): "suite", ["missing_key_types_js_dictionary", "missing_key_types"] ) def test_missing_key_types_detected_js_dictionary( - suite: str, tmp_path: Path, pytestconfig + suite: str, tmp_path: Path, pytestconfig: pytest.Config ): conf = configuration( pytestconfig, "objects", "javascript", tmp_path, f"{suite}.yaml", "correct" @@ -553,7 +226,9 @@ def test_missing_key_types_detected_js_dictionary( @pytest.mark.parametrize("lang", ["java"]) -def test_advanced_types_are_allowed(lang: str, tmp_path: Path, pytestconfig): +def test_advanced_types_are_allowed( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "objects", @@ -568,31 +243,10 @@ def test_advanced_types_are_allowed(lang: str, tmp_path: Path, pytestconfig): assert updates.find_status_enum() == ["correct"] -@pytest.mark.parametrize("lang", ["python", "java", "kotlin", "javascript", "csharp"]) -def test_programmed_evaluator_lotto(lang: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, "lotto", lang, tmp_path, "one-programmed-python.tson", "correct" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert len(updates.find_all("start-testcase")) == 1 - assert updates.find_status_enum() == ["correct"] - - -@pytest.mark.parametrize("lang", ["python", "java", "kotlin", "javascript", "csharp"]) -def test_programmed_evaluator_wrong(lang: str, tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, "lotto", lang, tmp_path, "one-programmed-python.tson", "wrong" - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert len(updates.find_all("start-testcase")) == 1 - assert updates.find_status_enum() == ["wrong"] - assert len(updates.find_all("append-message")) == 1 - - @pytest.mark.parametrize("language", ALL_LANGUAGES) -def test_batch_compilation(language: str, tmp_path: Path, pytestconfig, mocker): +def test_batch_compilation( + language: str, tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture +): config_ = {"options": {"mode": "batch"}} lang_class = LANGUAGES[language] spy = mocker.spy(lang_class, "compilation") @@ -608,7 +262,7 @@ def test_batch_compilation(language: str, tmp_path: Path, pytestconfig, mocker): @pytest.mark.parametrize("language", ALL_LANGUAGES) def test_batch_compilation_fallback( - language: str, tmp_path: Path, pytestconfig, mocker + language: str, tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture ): config_ = {"options": {"allow_fallback": True}} lang_class = LANGUAGES[language] @@ -625,7 +279,7 @@ def test_batch_compilation_fallback( @pytest.mark.parametrize("language", ALL_LANGUAGES) def test_batch_compilation_no_fallback( - language: str, tmp_path: Path, pytestconfig, mocker + language: str, tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture ): config_ = {"options": {"allow_fallback": False}} lang_class = LANGUAGES[language] @@ -642,7 +296,7 @@ def test_batch_compilation_no_fallback( @pytest.mark.parametrize("language", ALL_LANGUAGES) def test_batch_compilation_no_fallback_runtime( - language: str, tmp_path: Path, pytestconfig + language: str, tmp_path: Path, pytestconfig: pytest.Config ): config_ = {"options": {"allow_fallback": False}} conf = configuration( @@ -660,7 +314,7 @@ def test_batch_compilation_no_fallback_runtime( @pytest.mark.parametrize( "lang", ["python", "java", "c", "javascript", "kotlin", "bash", "csharp"] ) -def test_program_params(lang: str, tmp_path: Path, pytestconfig): +def test_program_params(lang: str, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration(pytestconfig, "sum", lang, tmp_path, "short.tson", "correct") result = execute_config(conf) updates = assert_valid_output(result, pytestconfig) @@ -672,7 +326,7 @@ def test_program_params(lang: str, tmp_path: Path, pytestconfig): @pytest.mark.parametrize( "language", ["python", "java", "kotlin", "javascript", "csharp"] ) -def test_objects(language: str, tmp_path: Path, pytestconfig): +def test_objects(language: str, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "objects", language, tmp_path, "plan.tson", "correct" ) @@ -685,7 +339,7 @@ def test_objects(language: str, tmp_path: Path, pytestconfig): @pytest.mark.parametrize( "language", ["python", "java", "kotlin", "javascript", "csharp"] ) -def test_objects_chained(language: str, tmp_path: Path, pytestconfig): +def test_objects_chained(language: str, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "objects", language, tmp_path, "chained.tson", "correct" ) @@ -698,7 +352,9 @@ def test_objects_chained(language: str, tmp_path: Path, pytestconfig): @pytest.mark.parametrize( "language", ["python", "java", "kotlin", "javascript", "csharp"] ) -def test_property_assignment(language: str, tmp_path: Path, pytestconfig): +def test_property_assignment( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "objects", @@ -716,7 +372,7 @@ def test_property_assignment(language: str, tmp_path: Path, pytestconfig): @pytest.mark.parametrize( "language", ["python", "java", "kotlin", "javascript", "csharp"] ) -def test_counter(language: str, tmp_path: Path, pytestconfig): +def test_counter(language: str, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "counter", language, tmp_path, "plan.yaml", "solution" ) @@ -729,7 +385,7 @@ def test_counter(language: str, tmp_path: Path, pytestconfig): @pytest.mark.parametrize( "language", ["python", "java", "kotlin", "javascript", "csharp"] ) -def test_counter_chained(language: str, tmp_path: Path, pytestconfig): +def test_counter_chained(language: str, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "counter", language, tmp_path, "chained.yaml", "solution" ) @@ -742,7 +398,7 @@ def test_counter_chained(language: str, tmp_path: Path, pytestconfig): @pytest.mark.parametrize( "language", ["python", "java", "kotlin", "javascript", "csharp"] ) -def test_objects_yaml(language: str, tmp_path: Path, pytestconfig): +def test_objects_yaml(language: str, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "objects", language, tmp_path, "plan.yaml", "correct" ) @@ -760,7 +416,7 @@ def test_objects_yaml(language: str, tmp_path: Path, pytestconfig): pytest.param("runhaskell", marks=pytest.mark.haskell), ], ) -def test_objects_error(language: str, tmp_path: Path, pytestconfig): +def test_objects_error(language: str, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "objects", language, tmp_path, "plan.tson", "correct" ) @@ -781,7 +437,9 @@ def test_objects_error(language: str, tmp_path: Path, pytestconfig): ("runhaskell", ["internal error"]), ], ) -def test_named_parameters(language: str, result: list, tmp_path: Path, pytestconfig): +def test_named_parameters( + language: str, result: list, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "echo-function", language, tmp_path, "one-named.tson", "correct" ) @@ -790,80 +448,6 @@ def test_named_parameters(language: str, result: list, tmp_path: Path, pytestcon assert updates.find_status_enum() == result -def test_javascript_exception_correct(tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, - "js-exceptions", - "javascript", - tmp_path, - "plan.yaml", - "correct", - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - assert len(updates.find_all("append-message")) == 0 - - -def test_javascript_exception_correct_temp(tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, - "js-exceptions", - "javascript", - tmp_path, - "plan.yaml", - "correct-temp", - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - assert len(updates.find_all("append-message")) == 0 - - -def test_javascript_exception_wrong(tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, - "js-exceptions", - "javascript", - tmp_path, - "plan.yaml", - "wrong", - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong"] - assert len(updates.find_all("append-message")) == 1 - - -def test_javascript_exception_wrong_null(tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, - "js-exceptions", - "javascript", - tmp_path, - "plan.yaml", - "wrong-null", - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong"] - assert len(updates.find_all("append-message")) == 0 - - -def test_javascript_exception_missing_message(tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, - "js-exceptions", - "javascript", - tmp_path, - "plan.yaml", - "wrong-message", - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["wrong"] - - def test_timeouts_propagate_to_contexts(): execution_result = ExecutionResult( stdout="--PaqJwrEn0-- SEP--pBoq4YdEP-- SEP", @@ -888,28 +472,6 @@ def test_timeouts_propagate_to_contexts(): assert context_result.exceptions == execution_result.testcase_separator -def test_function_arguments_without_brackets(tmp_path: Path, pytestconfig): - conf = configuration(pytestconfig, "", "haskell", tmp_path) - plan = Suite() - bundle = create_bundle(conf, sys.stdout, plan) - - statement = FunctionCall( - type=FunctionType.FUNCTION, - name="test", - namespace=None, - arguments=[ - NumberType(type=BasicNumericTypes.REAL, data=5.5), - StringType(type=BasicStringTypes.TEXT, data="hallo"), - BooleanType(type=BasicBooleanTypes.BOOLEAN, data=True), - ], - ) - - result = generate_statement(bundle, statement) - assert ( - result == f'{submission_name(bundle.language)}.test 5.5 :: Double "hallo" True' - ) - - @pytest.mark.parametrize( "language_and_expected", [ @@ -920,7 +482,9 @@ def test_function_arguments_without_brackets(tmp_path: Path, pytestconfig): ("python", "() Coord(x=5, y=6)"), ], ) -def test_unknown_return_type(tmp_path: Path, pytestconfig, language_and_expected): +def test_unknown_return_type( + tmp_path: Path, pytestconfig: pytest.Config, language_and_expected: tuple[str, str] +): language, expected = language_and_expected conf = configuration( pytestconfig, @@ -938,7 +502,9 @@ def test_unknown_return_type(tmp_path: Path, pytestconfig, language_and_expected @pytest.mark.parametrize("language", ALL_SPECIFIC_LANGUAGES) -def test_expected_no_return_but_got_some(language: str, tmp_path: Path, pytestconfig): +def test_expected_no_return_but_got_some( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "echo-function", @@ -954,7 +520,9 @@ def test_expected_no_return_but_got_some(language: str, tmp_path: Path, pytestco @pytest.mark.parametrize("language", ALL_SPECIFIC_LANGUAGES) -def test_expected_no_return_and_got_none(language: str, tmp_path: Path, pytestconfig): +def test_expected_no_return_and_got_none( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "echo-function", @@ -970,7 +538,9 @@ def test_expected_no_return_and_got_none(language: str, tmp_path: Path, pytestco @pytest.mark.parametrize("language", ALL_SPECIFIC_LANGUAGES) -def test_expected_return_but_got_none(language: str, tmp_path: Path, pytestconfig): +def test_expected_return_but_got_none( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "echo-function", @@ -986,7 +556,9 @@ def test_expected_return_but_got_none(language: str, tmp_path: Path, pytestconfi @pytest.mark.parametrize("language", ALL_SPECIFIC_LANGUAGES) -def test_expected_return_and_got_some(language: str, tmp_path: Path, pytestconfig): +def test_expected_return_and_got_some( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "echo-function", @@ -1002,7 +574,9 @@ def test_expected_return_and_got_some(language: str, tmp_path: Path, pytestconfi @pytest.mark.parametrize("language", ALL_SPECIFIC_LANGUAGES) -def test_ignored_return_and_got_some(language: str, tmp_path: Path, pytestconfig): +def test_ignored_return_and_got_some( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "echo-function", @@ -1018,7 +592,9 @@ def test_ignored_return_and_got_some(language: str, tmp_path: Path, pytestconfig @pytest.mark.parametrize("language", ALL_SPECIFIC_LANGUAGES) -def test_language_literals_work(language: str, tmp_path: Path, pytestconfig): +def test_language_literals_work( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "echo-function", @@ -1033,25 +609,10 @@ def test_language_literals_work(language: str, tmp_path: Path, pytestconfig): assert updates.find_status_enum() == ["correct"] -def test_python_input_prompt_is_ignored(tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, - "echo", - "python", - tmp_path, - "one.tson", - "input-prompt", - ) - - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] - - # Check that the test suite is valid with a correct submission. # This test suite is used for the test below "test_output_in_script_is_caught". @pytest.mark.parametrize("language", ["python", "javascript", "bash"]) -def test_two_suite_is_valid(language: str, tmp_path: Path, pytestconfig): +def test_two_suite_is_valid(language: str, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "echo-function", @@ -1066,7 +627,9 @@ def test_two_suite_is_valid(language: str, tmp_path: Path, pytestconfig): @pytest.mark.parametrize("language", ["python", "javascript", "bash"]) -def test_output_in_script_is_caught(language: str, tmp_path: Path, pytestconfig): +def test_output_in_script_is_caught( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "echo-function", @@ -1081,7 +644,7 @@ def test_output_in_script_is_caught(language: str, tmp_path: Path, pytestconfig) assert updates.find_status_enum() == ["wrong", "correct", "correct"] -def test_main_call_quotes(tmp_path: Path, pytestconfig): +def test_main_call_quotes(tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "echo-function", @@ -1102,7 +665,7 @@ def test_main_call_quotes(tmp_path: Path, pytestconfig): ) -def test_stdin_and_arguments_use_heredoc(tmp_path: Path, pytestconfig): +def test_stdin_and_arguments_use_heredoc(tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "echo-function", @@ -1126,7 +689,7 @@ def test_stdin_and_arguments_use_heredoc(tmp_path: Path, pytestconfig): ) -def test_stdin_token_is_unique(tmp_path: Path, pytestconfig): +def test_stdin_token_is_unique(tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "echo-function", @@ -1145,17 +708,3 @@ def test_stdin_token_is_unique(tmp_path: Path, pytestconfig): assert ( actual.description == "$ submission hello << 'STDINN'\nOne line\nSTDIN\nSTDINN" ) - - -def test_javascript_vanilla_object(tmp_path: Path, pytestconfig): - conf = configuration( - pytestconfig, - "echo-function", - "javascript", - tmp_path, - "javascript-object.yaml", - "javascript-object", - ) - result = execute_config(conf) - updates = assert_valid_output(result, pytestconfig) - assert updates.find_status_enum() == ["correct"] diff --git a/tests/test_integration_javascript.py b/tests/test_integration_javascript.py index f03868e0..ff30d907 100644 --- a/tests/test_integration_javascript.py +++ b/tests/test_integration_javascript.py @@ -8,6 +8,7 @@ from pathlib import Path import pytest +from syrupy import SnapshotAssertion # type: ignore[reportPrivateImportUsage] from tests.manual_utils import ( assert_valid_output, @@ -80,7 +81,12 @@ def get_exercises() -> list[Path]: @pytest.mark.parametrize("exercise", ALL_EXERCISES, ids=lambda ex: ex.name) -def test_javascript_exercise(exercise: Path, tmp_path: Path, pytestconfig, snapshot): +def test_javascript_exercise( + exercise: Path, + tmp_path: Path, + pytestconfig: pytest.Config, + snapshot: SnapshotAssertion, +): conf = exercise_configuration( pytestconfig, exercise, diff --git a/tests/test_io_exercises.py b/tests/test_io_exercises.py new file mode 100644 index 00000000..fc1e971c --- /dev/null +++ b/tests/test_io_exercises.py @@ -0,0 +1,170 @@ +""" +Tests specifically for IO exercises. +""" + +import shutil +from pathlib import Path + +import pytest + +from tested.languages.language import STRING_QUOTES +from tested.testsuite import SupportedLanguage +from tests.language_markers import ALL_LANGUAGES +from tests.manual_utils import assert_valid_output, configuration, execute_config + + +@pytest.mark.parametrize("language", ALL_LANGUAGES) +def test_io_exercise(language: str, tmp_path: Path, pytestconfig: pytest.Config): + conf = configuration( + pytestconfig, "echo", language, tmp_path, "one.tson", "correct" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + + +@pytest.mark.parametrize("language", ALL_LANGUAGES) +def test_io_exercise_wrong(language: str, tmp_path: Path, pytestconfig: pytest.Config): + conf = configuration(pytestconfig, "echo", language, tmp_path, "one.tson", "wrong") + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong"] + + +@pytest.mark.parametrize("language", ALL_LANGUAGES) +def test_io_function_exercise( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "echo-function", language, tmp_path, "one.tson", "correct" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + + +@pytest.mark.parametrize("language", ALL_LANGUAGES) +def test_io_function_file_exercise( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "echo-function-file", language, tmp_path, "one.tson", "correct" + ) + shutil.copytree( + Path(conf.resources).parent / "workdir", tmp_path, dirs_exist_ok=True + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + + +@pytest.mark.parametrize("language", ALL_LANGUAGES) +def test_io_function_additional_source_files( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, + "echo-function-additional-source-files", + language, + tmp_path, + "one.tson", + "correct", + ) + shutil.copytree( + Path(conf.resources).parent / "workdir", tmp_path, dirs_exist_ok=True + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + + +@pytest.mark.parametrize("language", ALL_LANGUAGES) +def test_io_function_escape_exercise( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "echo-function", language, tmp_path, "one-escape.tson", "correct" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + + +@pytest.mark.parametrize("language", ALL_LANGUAGES) +def test_io_function_display_multiline_exercise( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, + "echo-function", + language, + tmp_path, + "one-display-multiline.tson", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + start_test = updates.find_all("start-test") + close_test = updates.find_all("close-test") + assert 1 == len(start_test) + assert 1 == len(close_test) + assert "return" == start_test[0].get("channel", "") + expected, actual = start_test[0].get("expected", ""), close_test[0].get( + "generated", "" + ) + quote = STRING_QUOTES[SupportedLanguage(language)] + assert expected[0] != quote and expected[-1] != quote + assert actual[0] != quote and actual[-1] != quote + + +@pytest.mark.parametrize("language", ALL_LANGUAGES) +def test_io_function_display_no_multiline_exercise( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, + "echo-function", + language, + tmp_path, + "one-display-no-multiline.tson", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + start_test = updates.find_all("start-test") + close_test = updates.find_all("close-test") + assert 1 == len(start_test) + assert 1 == len(close_test) + assert "return" == start_test[0].get("channel", "") + expected, actual = start_test[0].get("expected", ""), close_test[0].get( + "generated", "" + ) + quote = STRING_QUOTES[SupportedLanguage(language)] + assert expected[0] == quote and expected[-1] == quote + assert actual[0] == quote and actual[-1] == quote + + +@pytest.mark.parametrize("language", ALL_LANGUAGES) +def test_io_function_nested_call_exercise( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "echo-function", language, tmp_path, "one-nested.yaml", "correct" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + + +@pytest.mark.parametrize("language", ("haskell", "runhaskell")) +def test_io_function_exercise_haskell_io( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "echo-function", language, tmp_path, "one.tson", "correct_io" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] diff --git a/tests/test_language_quircks.py b/tests/test_language_quircks.py new file mode 100644 index 00000000..dbb0c9bc --- /dev/null +++ b/tests/test_language_quircks.py @@ -0,0 +1,165 @@ +""" +Tests for specific aspects of certain language implementations. +""" + +import shutil +import sys +from pathlib import Path + +import pytest + +from tested.configs import create_bundle +from tested.datatypes import BasicBooleanTypes, BasicNumericTypes, BasicStringTypes +from tested.languages.conventionalize import submission_name +from tested.languages.generation import generate_statement +from tested.serialisation import ( + BooleanType, + FunctionCall, + FunctionType, + NumberType, + StringType, +) +from tested.testsuite import Suite +from tests.manual_utils import assert_valid_output, configuration, execute_config + + +def test_javascript_vanilla_object(tmp_path: Path, pytestconfig: pytest.Config): + conf = configuration( + pytestconfig, + "echo-function", + "javascript", + tmp_path, + "javascript-object.yaml", + "javascript-object", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + + +def test_python_input_prompt_is_ignored(tmp_path: Path, pytestconfig: pytest.Config): + conf = configuration( + pytestconfig, + "echo", + "python", + tmp_path, + "one.tson", + "input-prompt", + ) + + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + + +def test_haskell_function_arguments_without_brackets( + tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration(pytestconfig, "", "haskell", tmp_path) + plan = Suite() + bundle = create_bundle(conf, sys.stdout, plan) + + statement = FunctionCall( + type=FunctionType.FUNCTION, + name="test", + namespace=None, + arguments=[ + NumberType(type=BasicNumericTypes.REAL, data=5.5), + StringType(type=BasicStringTypes.TEXT, data="hallo"), + BooleanType(type=BasicBooleanTypes.BOOLEAN, data=True), + ], + ) + + result = generate_statement(bundle, statement) + assert ( + result == f'{submission_name(bundle.language)}.test 5.5 :: Double "hallo" True' + ) + + +def test_javascript_exception_correct(tmp_path: Path, pytestconfig: pytest.Config): + conf = configuration( + pytestconfig, + "js-exceptions", + "javascript", + tmp_path, + "plan.yaml", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + assert len(updates.find_all("append-message")) == 0 + + +def test_javascript_exception_correct_temp(tmp_path: Path, pytestconfig: pytest.Config): + conf = configuration( + pytestconfig, + "js-exceptions", + "javascript", + tmp_path, + "plan.yaml", + "correct-temp", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + assert len(updates.find_all("append-message")) == 0 + + +def test_javascript_exception_wrong(tmp_path: Path, pytestconfig: pytest.Config): + conf = configuration( + pytestconfig, + "js-exceptions", + "javascript", + tmp_path, + "plan.yaml", + "wrong", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong"] + assert len(updates.find_all("append-message")) == 1 + + +def test_javascript_exception_wrong_null(tmp_path: Path, pytestconfig: pytest.Config): + conf = configuration( + pytestconfig, + "js-exceptions", + "javascript", + tmp_path, + "plan.yaml", + "wrong-null", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong"] + assert len(updates.find_all("append-message")) == 0 + + +def test_javascript_exception_missing_message( + tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, + "js-exceptions", + "javascript", + tmp_path, + "plan.yaml", + "wrong-message", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong"] + + +@pytest.mark.parametrize("exercise", ["echo-function-file", "echo-function"]) +def test_javascript_async(exercise: str, tmp_path: Path, pytestconfig: pytest.Config): + conf = configuration( + pytestconfig, exercise, "javascript", tmp_path, "one.tson", "correct-async" + ) + workdir = Path(conf.resources).parent / "workdir" + if workdir.exists(): + shutil.copytree(workdir, tmp_path, dirs_exist_ok=True) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] diff --git a/tests/test_linters.py b/tests/test_linters.py index bd4f7db1..00db95ec 100644 --- a/tests/test_linters.py +++ b/tests/test_linters.py @@ -5,16 +5,15 @@ from tests.manual_utils import assert_valid_output, configuration, execute_config -def _get_config_options(language: str): +def _get_config_options(language: str) -> list[dict]: return [ {"options": {"language": {language: {"linter": True}}}}, {"options": {"linter": True}}, ] -@pytest.mark.linter @pytest.mark.parametrize("config", _get_config_options("c")) -def test_cppcheck(tmp_path: Path, config, pytestconfig): +def test_cppcheck(tmp_path: Path, config: dict, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "echo-function", @@ -29,9 +28,8 @@ def test_cppcheck(tmp_path: Path, config, pytestconfig): assert len(updates.find_all("annotate-code")) > 0 -@pytest.mark.linter @pytest.mark.parametrize("config", _get_config_options("java")) -def test_checkstyle(tmp_path: Path, config, pytestconfig): +def test_checkstyle(tmp_path: Path, config: dict, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "counter", @@ -46,9 +44,8 @@ def test_checkstyle(tmp_path: Path, config, pytestconfig): assert len(updates.find_all("annotate-code")) > 0 -@pytest.mark.linter @pytest.mark.parametrize("config", _get_config_options("javascript")) -def test_eslint(tmp_path: Path, config, pytestconfig): +def test_eslint(tmp_path: Path, config: dict, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "counter", @@ -63,7 +60,6 @@ def test_eslint(tmp_path: Path, config, pytestconfig): assert len(updates.find_all("annotate-code")) > 0 -@pytest.mark.linter @pytest.mark.parametrize( ("language", "config"), [ @@ -73,7 +69,9 @@ def test_eslint(tmp_path: Path, config, pytestconfig): ("runhaskell", _get_config_options("runhaskell")[1]), ], ) -def test_hlint(language: str, config, tmp_path: Path, pytestconfig): +def test_hlint( + language: str, config: dict, tmp_path: Path, pytestconfig: pytest.Config +): conf = configuration( pytestconfig, "echo-function", @@ -88,9 +86,8 @@ def test_hlint(language: str, config, tmp_path: Path, pytestconfig): assert len(updates.find_all("annotate-code")) > 0 -@pytest.mark.linter @pytest.mark.parametrize("config", _get_config_options("kotlin")) -def test_ktlint(tmp_path: Path, config, pytestconfig): +def test_ktlint(tmp_path: Path, config: dict, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "counter", "kotlin", tmp_path, "plan.yaml", "solution", config ) @@ -99,9 +96,8 @@ def test_ktlint(tmp_path: Path, config, pytestconfig): assert len(updates.find_all("annotate-code")) > 0 -@pytest.mark.linter @pytest.mark.parametrize("config", _get_config_options("python")) -def test_pylint(tmp_path: Path, config, pytestconfig): +def test_pylint(tmp_path: Path, config: dict, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "counter", @@ -116,9 +112,8 @@ def test_pylint(tmp_path: Path, config, pytestconfig): assert len(updates.find_all("annotate-code")) > 0 -@pytest.mark.linter @pytest.mark.parametrize("config", _get_config_options("bash")) -def test_shellcheck(tmp_path: Path, config, pytestconfig): +def test_shellcheck_wrong(tmp_path: Path, config: dict, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "echo", "bash", tmp_path, "one.tson", "wrong", config ) @@ -127,9 +122,8 @@ def test_shellcheck(tmp_path: Path, config, pytestconfig): assert len(updates.find_all("annotate-code")) > 0 -@pytest.mark.linter @pytest.mark.parametrize("config", _get_config_options("bash")) -def test_shellcheck(tmp_path: Path, config, pytestconfig): +def test_shellcheck_warning(tmp_path: Path, config: dict, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "echo", "bash", tmp_path, "one.tson", "warning", config ) diff --git a/tests/test_oracles.py b/tests/test_oracles_builtin.py similarity index 88% rename from tests/test_oracles.py rename to tests/test_oracles_builtin.py index d109f310..e071a7a3 100644 --- a/tests/test_oracles.py +++ b/tests/test_oracles_builtin.py @@ -2,6 +2,9 @@ from pathlib import Path from unittest.mock import ANY +import pytest +from pytest_mock import MockerFixture + import tested from tested.configs import create_bundle from tested.datatypes import BasicObjectTypes, BasicSequenceTypes, BasicStringTypes @@ -40,7 +43,7 @@ def oracle_config( return OracleConfig(bundle=bundle, options=options, context_dir=tmp_path) -def test_text_oracle(tmp_path: Path, pytestconfig): +def test_text_oracle(tmp_path: Path, pytestconfig: pytest.Config): config = oracle_config(tmp_path, pytestconfig, {"ignoreWhitespace": False}) channel = TextOutputChannel(data="expected") result = evaluate_text(config, channel, "expected") @@ -54,7 +57,7 @@ def test_text_oracle(tmp_path: Path, pytestconfig): assert result.readable_actual == "nothing" -def test_text_oracle_whitespace(tmp_path: Path, pytestconfig): +def test_text_oracle_whitespace(tmp_path: Path, pytestconfig: pytest.Config): config = oracle_config(tmp_path, pytestconfig, {"ignoreWhitespace": True}) channel = TextOutputChannel(data="expected") result = evaluate_text(config, channel, "expected ") @@ -68,7 +71,7 @@ def test_text_oracle_whitespace(tmp_path: Path, pytestconfig): assert result.readable_actual == "nothing" -def test_text_oracle_case_sensitive(tmp_path: Path, pytestconfig): +def test_text_oracle_case_sensitive(tmp_path: Path, pytestconfig: pytest.Config): config = oracle_config(tmp_path, pytestconfig, {"caseInsensitive": True}) channel = TextOutputChannel(data="expected") result = evaluate_text(config, channel, "Expected") @@ -82,7 +85,7 @@ def test_text_oracle_case_sensitive(tmp_path: Path, pytestconfig): assert result.readable_actual == "nothing" -def test_text_oracle_combination(tmp_path: Path, pytestconfig): +def test_text_oracle_combination(tmp_path: Path, pytestconfig: pytest.Config): config = oracle_config( tmp_path, pytestconfig, {"caseInsensitive": True, "ignoreWhitespace": True} ) @@ -98,7 +101,7 @@ def test_text_oracle_combination(tmp_path: Path, pytestconfig): assert result.readable_actual == "nothing" -def test_text_oracle_rounding(tmp_path: Path, pytestconfig): +def test_text_oracle_rounding(tmp_path: Path, pytestconfig: pytest.Config): config = oracle_config( tmp_path, pytestconfig, {"tryFloatingPoint": True, "applyRounding": True} ) @@ -114,7 +117,7 @@ def test_text_oracle_rounding(tmp_path: Path, pytestconfig): assert result.readable_actual == "1.5" -def test_text_oracle_round_to(tmp_path: Path, pytestconfig): +def test_text_oracle_round_to(tmp_path: Path, pytestconfig: pytest.Config): config = oracle_config( tmp_path, pytestconfig, @@ -132,9 +135,11 @@ def test_text_oracle_round_to(tmp_path: Path, pytestconfig): assert result.readable_actual == "1.5" -def test_file_oracle_full_wrong(tmp_path: Path, pytestconfig, mocker): +def test_file_oracle_full_wrong( + tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture +): config = oracle_config(tmp_path, pytestconfig, {"mode": "full"}) - s = mocker.spy(tested.oracles.text, name="compare_text") + s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue] mock_files = [ mocker.mock_open(read_data=content).return_value for content in ["expected\nexpected", "actual\nactual"] @@ -152,9 +157,11 @@ def test_file_oracle_full_wrong(tmp_path: Path, pytestconfig, mocker): assert result.readable_actual == "actual\nactual" -def test_file_oracle_full_correct(tmp_path: Path, pytestconfig, mocker): +def test_file_oracle_full_correct( + tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture +): config = oracle_config(tmp_path, pytestconfig, {"mode": "full"}) - s = mocker.spy(tested.oracles.text, name="compare_text") + s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue] mock_files = [ mocker.mock_open(read_data=content).return_value for content in ["expected\nexpected", "expected\nexpected"] @@ -172,11 +179,13 @@ def test_file_oracle_full_correct(tmp_path: Path, pytestconfig, mocker): assert result.readable_actual == "expected\nexpected" -def test_file_oracle_line_wrong(tmp_path: Path, pytestconfig, mocker): +def test_file_oracle_line_wrong( + tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture +): config = oracle_config( tmp_path, pytestconfig, {"mode": "line", "stripNewlines": True} ) - s = mocker.spy(tested.oracles.text, name="compare_text") + s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue] mock_files = [ mocker.mock_open(read_data=content).return_value for content in ["expected\nexpected2", "actual\nactual2"] @@ -196,11 +205,13 @@ def test_file_oracle_line_wrong(tmp_path: Path, pytestconfig, mocker): assert result.readable_actual == "actual\nactual2" -def test_file_oracle_line_correct(tmp_path: Path, pytestconfig, mocker): +def test_file_oracle_line_correct( + tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture +): config = oracle_config( tmp_path, pytestconfig, {"mode": "line", "stripNewlines": True} ) - s = mocker.spy(tested.oracles.text, name="compare_text") + s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue] mock_files = [ mocker.mock_open(read_data=content).return_value for content in ["expected\nexpected2", "expected\nexpected2"] @@ -220,11 +231,13 @@ def test_file_oracle_line_correct(tmp_path: Path, pytestconfig, mocker): assert result.readable_actual == "expected\nexpected2" -def test_file_oracle_strip_lines_correct(tmp_path: Path, pytestconfig, mocker): +def test_file_oracle_strip_lines_correct( + tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture +): config = oracle_config( tmp_path, pytestconfig, {"mode": "line", "stripNewlines": True} ) - s = mocker.spy(tested.oracles.text, name="compare_text") + s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue] mock_files = [ mocker.mock_open(read_data=content).return_value for content in ["expected\nexpected2\n", "expected\nexpected2"] @@ -244,11 +257,13 @@ def test_file_oracle_strip_lines_correct(tmp_path: Path, pytestconfig, mocker): assert result.readable_actual == "expected\nexpected2" -def test_file_oracle_dont_strip_lines_correct(tmp_path: Path, pytestconfig, mocker): +def test_file_oracle_dont_strip_lines_correct( + tmp_path: Path, pytestconfig: pytest.Config, mocker: MockerFixture +): config = oracle_config( tmp_path, pytestconfig, {"mode": "line", "stripNewlines": False} ) - s = mocker.spy(tested.oracles.text, name="compare_text") + s = mocker.spy(tested.oracles.text, name="compare_text") # type: ignore[reportAttributeAccessIssue] mock_files = [ mocker.mock_open(read_data=content).return_value for content in ["expected\nexpected2\n", "expected\nexpected2\n"] @@ -268,7 +283,9 @@ def test_file_oracle_dont_strip_lines_correct(tmp_path: Path, pytestconfig, mock assert result.readable_actual == "expected\nexpected2\n" -def test_exception_oracle_only_messages_correct(tmp_path: Path, pytestconfig): +def test_exception_oracle_only_messages_correct( + tmp_path: Path, pytestconfig: pytest.Config +): config = oracle_config(tmp_path, pytestconfig) channel = ExceptionOutputChannel(exception=ExpectedException(message="Test error")) actual_value = get_converter().dumps( @@ -280,7 +297,9 @@ def test_exception_oracle_only_messages_correct(tmp_path: Path, pytestconfig): assert result.readable_actual == "ZeroDivisionError: Test error" -def test_exception_oracle_only_messages_wrong(tmp_path: Path, pytestconfig): +def test_exception_oracle_only_messages_wrong( + tmp_path: Path, pytestconfig: pytest.Config +): config = oracle_config(tmp_path, pytestconfig) channel = ExceptionOutputChannel(exception=ExpectedException(message="Test error")) actual_value = get_converter().dumps( @@ -292,7 +311,9 @@ def test_exception_oracle_only_messages_wrong(tmp_path: Path, pytestconfig): assert result.readable_actual == "Pief poef" -def test_exception_oracle_correct_message_wrong_type(tmp_path: Path, pytestconfig): +def test_exception_oracle_correct_message_wrong_type( + tmp_path: Path, pytestconfig: pytest.Config +): channel = ExceptionOutputChannel( exception=ExpectedException( message="Test error", @@ -318,7 +339,9 @@ def test_exception_oracle_correct_message_wrong_type(tmp_path: Path, pytestconfi assert result.readable_actual == "ZeroDivisionError: Test error" -def test_exception_oracle_wrong_message_correct_type(tmp_path: Path, pytestconfig): +def test_exception_oracle_wrong_message_correct_type( + tmp_path: Path, pytestconfig: pytest.Config +): channel = ExceptionOutputChannel( exception=ExpectedException( message="Test error", @@ -347,7 +370,9 @@ def test_exception_oracle_wrong_message_correct_type(tmp_path: Path, pytestconfi assert result.readable_actual == "PafError: Test errors" -def test_exception_oracle_correct_type_and_message(tmp_path: Path, pytestconfig): +def test_exception_oracle_correct_type_and_message( + tmp_path: Path, pytestconfig: pytest.Config +): channel = ExceptionOutputChannel( exception=ExpectedException( message="Test error", @@ -376,7 +401,7 @@ def test_exception_oracle_correct_type_and_message(tmp_path: Path, pytestconfig) assert result.readable_actual == "PafError: Test error" -def test_value_string_as_text_is_detected(tmp_path: Path, pytestconfig): +def test_value_string_as_text_is_detected(tmp_path: Path, pytestconfig: pytest.Config): channel = ValueOutputChannel( value=StringType(type=BasicStringTypes.TEXT, data="multi\nline\nstring") ) @@ -390,7 +415,9 @@ def test_value_string_as_text_is_detected(tmp_path: Path, pytestconfig): assert result.readable_actual == "multi\nline\nstring" -def test_value_string_as_text_is_not_detected_if_disabled(tmp_path: Path, pytestconfig): +def test_value_string_as_text_is_not_detected_if_disabled( + tmp_path: Path, pytestconfig: pytest.Config +): channel = ValueOutputChannel( value=StringType(type=BasicStringTypes.TEXT, data="multi\nline\nstring") ) @@ -407,7 +434,7 @@ def test_value_string_as_text_is_not_detected_if_disabled(tmp_path: Path, pytest def test_value_string_as_text_is_not_detected_if_not_multiline( - tmp_path: Path, pytestconfig + tmp_path: Path, pytestconfig: pytest.Config ): channel = ValueOutputChannel( value=StringType(type=BasicStringTypes.TEXT, data="multi") @@ -424,7 +451,9 @@ def test_value_string_as_text_is_not_detected_if_not_multiline( assert result.readable_actual == "'multi\\nline\\nstring'" -def test_value_string_as_text_is_detected_when_no_actual(tmp_path: Path, pytestconfig): +def test_value_string_as_text_is_detected_when_no_actual( + tmp_path: Path, pytestconfig: pytest.Config +): channel = ValueOutputChannel( value=StringType(type=BasicStringTypes.TEXT, data="multi\nline\nstring") ) @@ -435,7 +464,9 @@ def test_value_string_as_text_is_detected_when_no_actual(tmp_path: Path, pytestc assert result.readable_actual == "" -def test_nested_sets_type_check_works_if_correct(tmp_path: Path, pytestconfig): +def test_nested_sets_type_check_works_if_correct( + tmp_path: Path, pytestconfig: pytest.Config +): expected_value = SequenceType( type=BasicSequenceTypes.SET, data=[ @@ -482,7 +513,9 @@ def test_nested_sets_type_check_works_if_correct(tmp_path: Path, pytestconfig): assert result.result.enum == Status.CORRECT -def test_too_many_sequence_values_dont_crash(tmp_path: Path, pytestconfig): +def test_too_many_sequence_values_dont_crash( + tmp_path: Path, pytestconfig: pytest.Config +): expected_value = SequenceType( type=BasicSequenceTypes.SEQUENCE, data=[ @@ -521,7 +554,7 @@ def test_too_many_sequence_values_dont_crash(tmp_path: Path, pytestconfig): assert result.result.enum == Status.WRONG -def test_too_many_object_values_dont_crash(tmp_path: Path, pytestconfig): +def test_too_many_object_values_dont_crash(tmp_path: Path, pytestconfig: pytest.Config): expected_value = ObjectType( type=BasicObjectTypes.MAP, data=[ @@ -555,7 +588,7 @@ def test_too_many_object_values_dont_crash(tmp_path: Path, pytestconfig): def test_values_different_lengths_are_detected_empty_actual( - tmp_path: Path, pytestconfig + tmp_path: Path, pytestconfig: pytest.Config ): channel = ValueOutputChannel( value=SequenceType( @@ -575,7 +608,7 @@ def test_values_different_lengths_are_detected_empty_actual( def test_values_different_lengths_are_detected_empty_expected( - tmp_path: Path, pytestconfig + tmp_path: Path, pytestconfig: pytest.Config ): channel = ValueOutputChannel( value=SequenceType(type=BasicSequenceTypes.SEQUENCE, data=[]) @@ -594,7 +627,9 @@ def test_values_different_lengths_are_detected_empty_expected( assert result.result.enum == Status.WRONG -def test_values_different_lengths_are_detected_different(tmp_path: Path, pytestconfig): +def test_values_different_lengths_are_detected_different( + tmp_path: Path, pytestconfig: pytest.Config +): channel = ValueOutputChannel( value=SequenceType( type=BasicSequenceTypes.SEQUENCE, @@ -615,7 +650,9 @@ def test_values_different_lengths_are_detected_different(tmp_path: Path, pytestc assert result.result.enum == Status.WRONG -def test_values_same_lengths_are_detected_different(tmp_path: Path, pytestconfig): +def test_values_same_lengths_are_detected_different( + tmp_path: Path, pytestconfig: pytest.Config +): channel = ValueOutputChannel( value=SequenceType( type=BasicSequenceTypes.SEQUENCE, @@ -639,7 +676,7 @@ def test_values_same_lengths_are_detected_different(tmp_path: Path, pytestconfig assert result.result.enum == Status.WRONG -def test_values_identical_list_is_detected(tmp_path: Path, pytestconfig): +def test_values_identical_list_is_detected(tmp_path: Path, pytestconfig: pytest.Config): channel = ValueOutputChannel( value=SequenceType( type=BasicSequenceTypes.SEQUENCE, @@ -663,7 +700,7 @@ def test_values_identical_list_is_detected(tmp_path: Path, pytestconfig): assert result.result.enum == Status.CORRECT -def test_list_and_map_works(tmp_path: Path, pytestconfig): +def test_list_and_map_works(tmp_path: Path, pytestconfig: pytest.Config): channel = ValueOutputChannel( value=SequenceType( type=BasicSequenceTypes.SEQUENCE, @@ -693,7 +730,7 @@ def test_list_and_map_works(tmp_path: Path, pytestconfig): assert result.result.enum == Status.WRONG -def test_map_and_list_works(tmp_path: Path, pytestconfig): +def test_map_and_list_works(tmp_path: Path, pytestconfig: pytest.Config): channel = ValueOutputChannel( value=ObjectType( type=BasicObjectTypes.MAP, diff --git a/tests/test_oracles_programmed.py b/tests/test_oracles_programmed.py new file mode 100644 index 00000000..df70c8ee --- /dev/null +++ b/tests/test_oracles_programmed.py @@ -0,0 +1,132 @@ +""" +Tests for programmed oracles (also known as custom check functions). +""" + +from pathlib import Path + +import pytest + +from tests.language_markers import ALL_LANGUAGES +from tests.manual_utils import assert_valid_output, configuration, execute_config + + +@pytest.mark.parametrize("language", ALL_LANGUAGES) +def test_custom_check_function_stdout( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, + "echo", + language, + tmp_path, + "one-programmed-correct.tson", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + + +def test_custom_check_function_stdout_wrong_result( + tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "echo", "python", tmp_path, "one-programmed-wrong.tson", "correct" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong"] + + +@pytest.mark.parametrize("language", ALL_LANGUAGES) +def test_custom_check_function_return( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, + "echo-function", + language, + tmp_path, + "programmed.tson", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + assert len(updates.find_all("append-message")) + + +def test_custom_check_function_runtime_crash( + tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, + "echo-function", + "python", + tmp_path, + "programmed_crash.yaml", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["internal error"] + assert len(updates.find_all("append-message")) == 4 + + +def test_custom_check_function_syntax_error( + tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, + "echo-function", + "python", + tmp_path, + "programmed_syntax_error.yaml", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["internal error"] + assert len(updates.find_all("append-message")) == 4 + + +def test_missing_custom_check_function(tmp_path: Path, pytestconfig: pytest.Config): + conf = configuration( + pytestconfig, + "echo-function", + "python", + tmp_path, + "programmed_missing.yaml", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["internal error"] + assert len(updates.find_all("append-message")) == 4 + + +@pytest.mark.parametrize("lang", ["python", "java", "kotlin", "javascript", "csharp"]) +def test_custom_check_function_lotto_correct( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "lotto", lang, tmp_path, "one-programmed-python.tson", "correct" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert len(updates.find_all("start-testcase")) == 1 + assert updates.find_status_enum() == ["correct"] + + +@pytest.mark.parametrize("lang", ["python", "java", "kotlin", "javascript", "csharp"]) +def test_custom_check_function_lotto_wrong( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "lotto", lang, tmp_path, "one-programmed-python.tson", "wrong" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert len(updates.find_all("start-testcase")) == 1 + assert updates.find_status_enum() == ["wrong"] + assert len(updates.find_all("append-message")) == 1 diff --git a/tests/test_oracles_specific.py b/tests/test_oracles_specific.py new file mode 100644 index 00000000..acb2fac5 --- /dev/null +++ b/tests/test_oracles_specific.py @@ -0,0 +1,94 @@ +""" +Testcases for language-specific oracles. +""" + +from pathlib import Path + +import pytest + +from tests.language_markers import ALL_SPECIFIC_LANGUAGES, EXCEPTION_LANGUAGES +from tests.manual_utils import assert_valid_output, configuration, execute_config + + +@pytest.mark.parametrize("language", ALL_SPECIFIC_LANGUAGES) +def test_specific_oracle_return( + language: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, + "echo-function", + language, + tmp_path, + "two-specific.tson", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong", "correct"] + assert len(updates.find_all("append-message")) == 2 + + +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_specific_oracle_exception_correct( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "division", lang, tmp_path, "plan.json", "correct" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["correct"] + + +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_specific_oracle_exception_wrong( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration(pytestconfig, "division", lang, tmp_path, "plan.json", "wrong") + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong"] + assert len(updates.find_all("append-message")) == 1 + + +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_specific_oracle_exception_wrong_exception( + lang: str, tmp_path: Path, pytestconfig +): + conf = configuration( + pytestconfig, "division", lang, tmp_path, "plan.json", "wrong-error" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong"] + + +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_specific_oracle_exception_syntax_error( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, "division", lang, tmp_path, "plan-syntax-error.json", "correct" + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["compilation error"] + assert len(updates.find_all("append-message")) == 1 + + +@pytest.mark.parametrize("lang", EXCEPTION_LANGUAGES) +def test_specific_oracle_exception_runtime_exception( + lang: str, tmp_path: Path, pytestconfig: pytest.Config +): + conf = configuration( + pytestconfig, + "division", + lang, + tmp_path, + "plan-runtime-exception.json", + "correct", + ) + result = execute_config(conf) + updates = assert_valid_output(result, pytestconfig) + assert updates.find_status_enum() == ["wrong", "wrong"] + assert len(updates.find_all("append-message")) >= 1 diff --git a/tests/test_slow.py b/tests/test_parallel_execution.py similarity index 72% rename from tests/test_slow.py rename to tests/test_parallel_execution.py index 1587a375..50f40a2b 100644 --- a/tests/test_slow.py +++ b/tests/test_parallel_execution.py @@ -1,17 +1,17 @@ """ -Tests where full exercises are run. These are inherently slower. +Test full exercises with the parallel option. """ from pathlib import Path import pytest +from tests.language_markers import ALL_LANGUAGES from tests.manual_utils import assert_valid_output, configuration, execute_config -@pytest.mark.slow @pytest.mark.parametrize("lang", ["python", "java", "kotlin"]) -def test_full_isbn(lang: str, tmp_path: Path, pytestconfig): +def test_parallel_isbn(lang: str, tmp_path: Path, pytestconfig: pytest.Config): config_ = {"options": {"parallel": True}} conf = configuration( pytestconfig, "isbn", lang, tmp_path, "full.tson", "solution", options=config_ @@ -22,18 +22,17 @@ def test_full_isbn(lang: str, tmp_path: Path, pytestconfig): assert updates.find_status_enum() == ["correct"] * 100 -@pytest.mark.slow @pytest.mark.parametrize( "lang", [ "java", "python", "kotlin", - pytest.param("haskell", marks=pytest.mark.haskell), - pytest.param("runhaskell", marks=pytest.mark.haskell), + "haskell", + "runhaskell", ], ) -def test_full_isbn_list(lang: str, tmp_path: Path, pytestconfig): +def test_parallel_isbn_list(lang: str, tmp_path: Path, pytestconfig: pytest.Config): config_ = {"options": {"parallel": True}} conf = configuration( pytestconfig, @@ -50,9 +49,8 @@ def test_full_isbn_list(lang: str, tmp_path: Path, pytestconfig): assert updates.find_status_enum() == ["correct"] * 100 -@pytest.mark.slow @pytest.mark.parametrize("lang", ["java", "python", "kotlin"]) -def test_full_lotto(lang: str, tmp_path: Path, pytestconfig): +def test_parallel_lotto(lang: str, tmp_path: Path, pytestconfig: pytest.Config): config_ = {"options": {"parallel": True}} conf = configuration( pytestconfig, "lotto", lang, tmp_path, "plan.tson", "correct", options=config_ @@ -63,20 +61,8 @@ def test_full_lotto(lang: str, tmp_path: Path, pytestconfig): assert updates.find_status_enum() == ["correct"] * 45 -@pytest.mark.slow -@pytest.mark.parametrize( - "lang", - [ - "java", - "python", - "c", - "javascript", - "kotlin", - "bash", - pytest.param("haskell", marks=pytest.mark.haskell), - ], -) -def test_full_echo(lang: str, tmp_path: Path, pytestconfig): +@pytest.mark.parametrize("lang", ALL_LANGUAGES) +def test_parallel_echo(lang: str, tmp_path: Path, pytestconfig: pytest.Config): config_ = {"options": {"parallel": True}} conf = configuration( pytestconfig, "echo", lang, tmp_path, "full.tson", "correct", options=config_ diff --git a/tests/test_serialisation.py b/tests/test_serialisation.py index eee6bbf8..bf86a12c 100644 --- a/tests/test_serialisation.py +++ b/tests/test_serialisation.py @@ -225,7 +225,7 @@ def assert_serialisation(bundle: Bundle, expected: Value): @pytest.mark.parametrize("language", LANGUAGES) -def test_basic_types(language, tmp_path: Path, pytestconfig): +def test_basic_types(language, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration(pytestconfig, "", language, tmp_path) plan = Suite() bundle = create_bundle(conf, sys.stdout, plan) @@ -250,7 +250,7 @@ def test_basic_types(language, tmp_path: Path, pytestconfig): @pytest.mark.parametrize("language", LANGUAGES) -def test_advanced_types(language, tmp_path: Path, pytestconfig): +def test_advanced_types(language, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration(pytestconfig, "", language, tmp_path) plan = Suite() bundle = create_bundle(conf, sys.stdout, plan) @@ -276,7 +276,7 @@ def test_advanced_types(language, tmp_path: Path, pytestconfig): @pytest.mark.parametrize("language", LANGUAGES) -def test_escape_double(language, tmp_path: Path, pytestconfig): +def test_escape_double(language, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration(pytestconfig, "", language, tmp_path) plan = Suite() bundle = create_bundle(conf, sys.stdout, plan) @@ -284,7 +284,7 @@ def test_escape_double(language, tmp_path: Path, pytestconfig): @pytest.mark.parametrize("language", LANGUAGES) -def test_escape_single(language, tmp_path: Path, pytestconfig): +def test_escape_single(language, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration(pytestconfig, "", language, tmp_path) plan = Suite() bundle = create_bundle(conf, sys.stdout, plan) @@ -292,7 +292,7 @@ def test_escape_single(language, tmp_path: Path, pytestconfig): @pytest.mark.parametrize("language", LANGUAGES) -def test_special_numbers(language, tmp_path: Path, pytestconfig): +def test_special_numbers(language, tmp_path: Path, pytestconfig: pytest.Config): conf = configuration(pytestconfig, "", language, tmp_path) plan = Suite() bundle = create_bundle(conf, sys.stdout, plan) @@ -332,7 +332,7 @@ def test_special_numbers(language, tmp_path: Path, pytestconfig): @pytest.mark.parametrize("language", LANGUAGES) -def test_valid_type_map(language: str, tmp_path: Path, pytestconfig): +def test_valid_type_map(language: str, tmp_path: Path, pytestconfig: pytest.Config): # Get a type map. conf = configuration(pytestconfig, "", language, tmp_path) plan = Suite() diff --git a/tests/test_stacktrace_cleaners.py b/tests/test_stacktrace_cleaners.py index bc087c2a..8b284c76 100644 --- a/tests/test_stacktrace_cleaners.py +++ b/tests/test_stacktrace_cleaners.py @@ -3,7 +3,8 @@ import pytest from tested.configs import DodonaConfig, GlobalConfig -from tested.languages import LANGUAGES, Language +from tested.languages import Language +from tested.languages import get_language as upstream_get_language # noinspection PyProtectedMember from tested.languages.utils import ( @@ -11,6 +12,7 @@ _replace_code_line_number, ) from tested.testsuite import Suite, SupportedLanguage +from tests.language_markers import ALL_LANGUAGES def get_language(workdir: str, language: str) -> Language: @@ -30,7 +32,7 @@ def get_language(workdir: str, language: str) -> Language: context_separator_secret="", suite=Suite(tabs=[]), ) - return LANGUAGES[language](global_config) + return upstream_get_language(global_config, language) def test_javascript_assertion_error(): @@ -73,7 +75,7 @@ def test_javascript_type_error(): assert actual_cleaned == expected_cleaned -@pytest.mark.parametrize("language", LANGUAGES.keys()) +@pytest.mark.parametrize("language", ALL_LANGUAGES) def test_empty_stacktrace(language): workdir = "/home/bliep/bloep/universal-judge/workdir" language_config = get_language(workdir, language) diff --git a/tests/test_suite.py b/tests/test_suite.py index 7c61d485..d194691b 100644 --- a/tests/test_suite.py +++ b/tests/test_suite.py @@ -1,3 +1,8 @@ +""" +Tests for the test suites, mainly to check backwards compatibility. +If making a breaking change, add a test here to ensure it doesn't break later. +""" + from tested.parsing import get_converter from tested.testsuite import ( CustomCheckOracle, @@ -5,6 +10,7 @@ ExitCodeOutputChannel, FileOutputChannel, MainInput, + TextData, TextOutputChannel, ValueOutputChannel, ) @@ -82,6 +88,7 @@ def test_input_deprecated_attribute_is_accepted(): } """ result = get_converter().loads(scheme, MainInput) + assert isinstance(result.stdin, TextData) assert result.stdin.data == "input-1" @@ -121,6 +128,7 @@ def test_value_show_expected_is_accepted(): } """ result = get_converter().loads(scheme, ValueOutputChannel) + assert result.value assert result.value.data == "yes" @@ -134,6 +142,7 @@ def test_exception_show_expected_is_accepted(): } """ result = get_converter().loads(scheme, ExceptionOutputChannel) + assert result.exception assert result.exception.message == "text" diff --git a/tests/test_utils.py b/tests/test_utils.py index 0c5870b7..a72ba04b 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,13 +1,13 @@ import json from pathlib import Path +import pytest import yaml from tested.datatypes import ( AdvancedNothingTypes, AdvancedSequenceTypes, BasicNumericTypes, - BasicSequenceTypes, ) from tested.serialisation import NothingType, NumberType, SequenceType from tested.utils import sorted_no_duplicates, sorting_value_extract @@ -38,8 +38,9 @@ def test_javascript_ast_parse(): output = run_command( demo_file.parent, timeout=None, - command=["node", parse_file, demo_file.absolute()], + command=["node", str(parse_file), str(demo_file.absolute())], ) + assert output namings = frozenset(output.stdout.strip().split(", ")) assert namings == expected @@ -263,7 +264,7 @@ def recursive_iter_dir(directory: Path) -> list[Path]: assert True -def test_invalid_utf8_output_is_caught(tmp_path: Path, pytestconfig): +def test_invalid_utf8_output_is_caught(tmp_path: Path, pytestconfig: pytest.Config): conf = configuration( pytestconfig, "sum", "bash", tmp_path, "short.tson", "non-utf8-output" ) diff --git a/tests/tested-draft7.json b/tests/tested-draft7.json new file mode 100644 index 00000000..1e49ec74 --- /dev/null +++ b/tests/tested-draft7.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string", + "oracle", + "expression" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": {"$ref": "#"}, + "then": {"$ref": "#"}, + "else": {"$ref": "#"}, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true +}