-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathcode.npt
293 lines (293 loc) · 20 KB
/
code.npt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
{
"source": {
"author": {
"name": "Christian Lawson-Perfect"
}
},
"name": "Code",
"short_name": "mark-code-3",
"description": "<p>Mark code provided by the student by running it and a series of validation and marking tests.</p>\n<p>The validation tests are used to reject an answer if the student has misunderstood the task, for example if they haven't defined a required variable or function.</p>\n<p>Marking tests check properties of the student's code. Each test awards a proportion of the available credit if it is passed.</p>\n<p>You can optionally show the student the STDOUT and/or STDERR when running their code.</p>\n<p>You can give a preamble and postamble which are run before and after the student's code, and also modify the student's code before running it.</p>",
"help_url": "",
"input_widget": "code-editor",
"input_options": {
"correctAnswer": "if(settings[\"correct_answer_subvars\"],\n render(settings[\"correct_answer\"])\n,\n settings[\"correct_answer\"]\n)",
"hint": {
"static": false,
"value": "\"Write \"+capitalise(language_synonym(settings[\"code_language\"]))+\" code\""
},
"language": {
"static": false,
"value": "language_synonym(settings[\"code_language\"])"
},
"placeholder": {
"static": false,
"value": "if(settings[\"correct_answer_subvars\"],\n render(settings[\"placeholder\"])\n,\n settings[\"placeholder\"]\n)"
},
"theme": {
"static": true,
"value": "textmate"
}
},
"can_be_gap": true,
"can_be_step": true,
"marking_script": "mark:\napply(main_error);\napply(show_images);\napply(matplotlib_feedback);\napply(postamble_feedback);\napply(validation_test_feedback);\napply(marking_test_feedback)\n\ninterpreted_answer:\nstudentAnswer\n\nmain_result:\ncode_result[3]\n\nmarking_results:\ncode_result[6..(len(settings[\"tests\"])+6)]\n\nvalidation_results:\ncode_result[(len(settings[\"tests\"])+6)..len(code_result)]\n\nmain_error:\nassert(main_stdout=\"\" or not settings[\"show_stdout\"],\n feedback(\"Your code produced this output:<pre>\"+main_stdout+\"</pre>\")\n);\nassert(main_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in your code: <pre>{main_result[\"stderr\"]}</pre>\"\"\")\n ,\n fail(\"There was an error in your code.\")\n )\n)\n\nmarking_test_feedback:\nmap(\n let(\n [name,weight,code], test,\n header, \"<strong>Test: {name}</strong> \",\n if(r[\"success\"],\n let(\n result, r[\"result\"],\n max_credit, weight/total_weight,\n credit, if(result isa \"number\", result, award(1,result)),\n switch(\n credit=0, negative_feedback(header+\"was not passed.\"),\n credit=1, add_credit(max_credit, header+\"was passed.\"),\n add_credit(credit*max_credit, header+\"was partially passed.\")\n )\n )\n ,\n if(settings[\"show_marking_errors\"],\n negative_feedback(\"\"\"There was an error: <pre>{r[\"stderr\"]}</pre>\"\"\")\n ,\n negative_feedback(header+\"was not passed.\")\n )\n )\n ),\n [test,r],\n zip(settings[\"tests\"],marking_results)\n)\n\nvalidation_test_feedback:\nmap(\n let([name,code], test,\n if(r[\"success\"],\n if(r[\"result\"],\n true\n ,\n fail(\"\"\"Your code failed the test <em>{name}</em>.\"\"\");false\n )\n ,\n fail(\"\"\"There was an error running the test <em>{name}</em>: <pre>{r[\"stderr\"]}</pre>\"\"\")\n )\n ),\n [test,r],\n zip(settings[\"validation_tests\"],validation_results)\n)\n\ntotal_weight:\nsum(map(weight,[name,weight,code],settings[\"tests\"]))\n\npre_submit:\n[run_code(code_language,\n [\n matplotlib_preamble,\n variables_as_code(language_synonym(code_language), settings[\"variables\"]),\n render(settings[\"preamble\"]),\n if(trim(settings[\"modifier\"])=\"\", studentAnswer, eval(expression(settings[\"modifier\"]))),\n render(settings[\"postamble\"]),\n matplotlib_postamble\n ]\n +map(code,[name,marks,code],settings[\"tests\"])\n +map(code,[name,code],settings[\"validation_tests\"])\n)]\n\ncode_result:\npre_submit[\"code_result\"]\n\nmain_stdout:\nsafe(main_result[\"stdout\"])\n\ncode_language:\nsettings[\"code_language\"]\n\npreamble_result:\ncode_result[2]\n\npreamble_stderr:\npreamble_result[\"stderr\"]\n\npostamble_result:\ncode_result[4]\n\npostamble_stderr:\npostamble_result[\"stderr\"]\n\npostamble_feedback:\nassert(postamble_result[\"stdout\"]=\"\",\n feedback(postamble_result[\"stdout\"])\n);\nassert(postamble_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in the marking routine postamble: <pre>{postamble_result[\"stderr\"]}</pre>\"\"\")\n ,\n fail(\"There was an error in the marking routine postamble.\")\n )\n)\n\nmatplotlib_preamble:\nif(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n plt.clf() \n\"\"\"),\n \"\"\n)\n\nmatplotlib_postamble:\nif(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n fig = plt.gcf()\n if fig.get_axes():\n fig.savefig(sys.stdout, format='svg')\n\"\"\"),\n \"\"\n)\n\nmatplotlib_result:\ncode_result[5]\n\nmatplotlib_feedback:\nassert(matplotlib_result[\"stdout\"]=\"\",\n feedback(matplotlib_result[\"stdout\"])\n)\n\nimages:\nflatten(map(\n get(r,\"images\",[]),\n r,\n code_result\n))\n\nshow_images:\nassert(len(images)=0 or not settings[\"show_stdout\"],\n feedback(\"Your code produced the following {pluralise(len(images),'image','images')}:\");\n map(\n feedback(html(x)),\n x,\n images\n )\n)",
"marking_notes": [
{
"name": "mark",
"description": "This is the main marking note. It should award credit and provide feedback based on the student's answer.",
"definition": "apply(main_error);\napply(show_images);\napply(matplotlib_feedback);\napply(postamble_feedback);\napply(validation_test_feedback);\napply(marking_test_feedback)"
},
{
"name": "interpreted_answer",
"description": "A value representing the student's answer to this part.",
"definition": "studentAnswer"
},
{
"name": "main_result",
"description": "<p>The result of running the student's code and the preamble, without any tests.</p>\n<p>Normally used to detect errors in the student's code.</p>",
"definition": "code_result[3]"
},
{
"name": "marking_results",
"description": "<p>The results of running the marking tests.</p>",
"definition": "code_result[6..(len(settings[\"tests\"])+6)]"
},
{
"name": "validation_results",
"description": "<p>The results of running the validation tests.</p>",
"definition": "code_result[(len(settings[\"tests\"])+6)..len(code_result)]"
},
{
"name": "main_error",
"description": "<p>Show STDOUT if allowed.</p>\n<p>Check the student's code runs on its own. Fail if there was an error, and show STDERR if allowed.</p>",
"definition": "assert(main_stdout=\"\" or not settings[\"show_stdout\"],\n feedback(\"Your code produced this output:<pre>\"+main_stdout+\"</pre>\")\n);\nassert(main_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in your code: <pre>{main_result[\"stderr\"]}</pre>\"\"\")\n ,\n fail(\"There was an error in your code.\")\n )\n)"
},
{
"name": "marking_test_feedback",
"description": "<p>Feedback on the marking tests. For each test, if the test was passed then add the corresponding amount of credit. If there was an error, show the error.</p>",
"definition": "map(\n let(\n [name,weight,code], test,\n header, \"<strong>Test: {name}</strong> \",\n if(r[\"success\"],\n let(\n result, r[\"result\"],\n max_credit, weight/total_weight,\n credit, if(result isa \"number\", result, award(1,result)),\n switch(\n credit=0, negative_feedback(header+\"was not passed.\"),\n credit=1, add_credit(max_credit, header+\"was passed.\"),\n add_credit(credit*max_credit, header+\"was partially passed.\")\n )\n )\n ,\n if(settings[\"show_marking_errors\"],\n negative_feedback(\"\"\"There was an error: <pre>{r[\"stderr\"]}</pre>\"\"\")\n ,\n negative_feedback(header+\"was not passed.\")\n )\n )\n ),\n [test,r],\n zip(settings[\"tests\"],marking_results)\n)"
},
{
"name": "validation_test_feedback",
"description": "<p>Give feedback on the validation tests. If any of them are not passed, the student's answer is invalid.</p>",
"definition": "map(\n let([name,code], test,\n if(r[\"success\"],\n if(r[\"result\"],\n true\n ,\n fail(\"\"\"Your code failed the test <em>{name}</em>.\"\"\");false\n )\n ,\n fail(\"\"\"There was an error running the test <em>{name}</em>: <pre>{r[\"stderr\"]}</pre>\"\"\")\n )\n ),\n [test,r],\n zip(settings[\"validation_tests\"],validation_results)\n)"
},
{
"name": "total_weight",
"description": "<p>The sum of the weights of the marking tests. Each test's weight is divided by this to produce a proportion of the available credit.</p>",
"definition": "sum(map(weight,[name,weight,code],settings[\"tests\"]))"
},
{
"name": "pre_submit",
"description": "<p>The code blocks to run.</p>\n<p>In order, they are:</p>\n<ul>\n<li>0 - The matplotlib hack preamble (in Python, make sure matplotlib plots are displayed)</li>\n<li>1 - Set passed-in variables</li>\n<li>2 - Preamble</li>\n<li>3 - The student's code</li>\n<li>4 - Postamble</li>\n<li>5 - The matplotlib hack postamble</li>\n<li>Marking tests</li>\n<li>Validation tests</li>\n</ul>",
"definition": "[run_code(code_language,\n [\n matplotlib_preamble,\n variables_as_code(language_synonym(code_language), settings[\"variables\"]),\n render(settings[\"preamble\"]),\n if(trim(settings[\"modifier\"])=\"\", studentAnswer, eval(expression(settings[\"modifier\"]))),\n render(settings[\"postamble\"]),\n matplotlib_postamble\n ]\n +map(code,[name,marks,code],settings[\"tests\"])\n +map(code,[name,code],settings[\"validation_tests\"])\n)]"
},
{
"name": "code_result",
"description": "<p>The results of the code blocks: a list with an entry corresponding to each block of code.</p>",
"definition": "pre_submit[\"code_result\"]"
},
{
"name": "main_stdout",
"description": "<p>The stdout from the student's code.</p>",
"definition": "safe(main_result[\"stdout\"])"
},
{
"name": "code_language",
"description": "<p>The language the code is written in. Either \"pyodide\" (Python) or \"webr\" (R)</p>",
"definition": "settings[\"code_language\"]"
},
{
"name": "preamble_result",
"description": "<p>The result of running the preamble block.</p>",
"definition": "code_result[2]"
},
{
"name": "preamble_stderr",
"description": "<p>The STDERR produced by the preamble block.</p>",
"definition": "preamble_result[\"stderr\"]"
},
{
"name": "postamble_result",
"description": "<p>The result of running the postamble.</p>",
"definition": "code_result[4]"
},
{
"name": "postamble_stderr",
"description": "<p>The STDERR produced by the postamble block.</p>",
"definition": "postamble_result[\"stderr\"]"
},
{
"name": "postamble_feedback",
"description": "<p>Show the STDOUT from the postamble, if there is any.</p>",
"definition": "assert(postamble_result[\"stdout\"]=\"\",\n feedback(postamble_result[\"stdout\"])\n);\nassert(postamble_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in the marking routine postamble: <pre>{postamble_result[\"stderr\"]}</pre>\"\"\")\n ,\n fail(\"There was an error in the marking routine postamble.\")\n )\n)"
},
{
"name": "matplotlib_preamble",
"description": "<p>Preamble for a hack to ensure that figures produced by matplotlib in Python are displayed.</p>\n<p>This code clears the matplotlib output, if matplotlib has been loaded.</p>",
"definition": "if(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n plt.clf() \n\"\"\"),\n \"\"\n)"
},
{
"name": "matplotlib_postamble",
"description": "<p>A hack to show any figures produced with matplotlib in the stdout.</p>",
"definition": "if(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n fig = plt.gcf()\n if fig.get_axes():\n fig.savefig(sys.stdout, format='svg')\n\"\"\"),\n \"\"\n)"
},
{
"name": "matplotlib_result",
"description": "<p>The result of running the matplotlib hack.</p>",
"definition": "code_result[5]"
},
{
"name": "matplotlib_feedback",
"description": "<p>Feedback from the matplotlib hack: if a figure is produced, it's displayed as SVG here.</p>",
"definition": "assert(matplotlib_result[\"stdout\"]=\"\",\n feedback(matplotlib_result[\"stdout\"])\n)"
},
{
"name": "images",
"description": "<p>Any images produced by the code blocks.</p>",
"definition": "flatten(map(\n get(r,\"images\",[]),\n r,\n code_result\n))"
},
{
"name": "show_images",
"description": "<p>Show the images produced by the code.</p>",
"definition": "assert(len(images)=0 or not settings[\"show_stdout\"],\n feedback(\"Your code produced the following {pluralise(len(images),'image','images')}:\");\n map(\n feedback(html(x)),\n x,\n images\n )\n)"
}
],
"settings": [
{
"name": "show_input_hint",
"label": "Show the input hint?",
"help_url": "",
"hint": "",
"input_type": "checkbox",
"default_value": true
},
{
"name": "code_language",
"label": "Code language",
"help_url": "",
"hint": "The language that the student's code will be written in.",
"input_type": "dropdown",
"default_value": "pyodide",
"choices": [
{
"value": "pyodide",
"label": "Python"
},
{
"value": "webr",
"label": "R"
}
]
},
{
"name": "correct_answer",
"label": "Correct answer",
"help_url": "",
"hint": "A correct answer to the part.",
"input_type": "code",
"default_value": "",
"evaluate": false
},
{
"name": "correct_answer_subvars",
"label": "Substitute question variables into the correct answer and placeholder?",
"help_url": "",
"hint": "If ticked, then JME expressions between curly braces will be evaluated and substituted into the correct answer and the placeholder.<br /><br />If not ticked, then the correct answer and placeholder will be displayed exactly as they are.",
"input_type": "checkbox",
"default_value": true
},
{
"name": "show_stdout",
"label": "Show stdout?",
"help_url": "",
"hint": "If ticked, the STDOUT produced after running the student's code will be shown in the feedback.",
"input_type": "checkbox",
"default_value": true
},
{
"name": "show_stderr",
"label": "Show stderr?",
"help_url": "",
"hint": "If ticked, the STDERR produced after running the student's code will be shown in the feedback.",
"input_type": "checkbox",
"default_value": true
},
{
"name": "show_marking_errors",
"label": "Show errors produced by marking tests?",
"help_url": "",
"hint": "",
"input_type": "checkbox",
"default_value": false
},
{
"name": "placeholder",
"label": "Placeholder",
"help_url": "",
"hint": "Initial text for the code editor. Question variables are not substituted into this text.",
"input_type": "code",
"default_value": "",
"evaluate": false
},
{
"name": "modifier",
"label": "Student code modifier",
"help_url": "",
"hint": "JME expression to modify the student's submitted code before being passed to the marking template. The student's code is available as the string variable <code>studentAnswer</code>.",
"input_type": "code",
"default_value": "",
"evaluate": false
},
{
"name": "preamble",
"label": "Preamble",
"help_url": "",
"hint": "This code is run before the student's code. Define anything that the student's code or your tests need. Expressions between curly braces are interpreted as variable substitution.",
"input_type": "code",
"default_value": "",
"evaluate": false
},
{
"name": "postamble",
"label": "Postamble",
"help_url": "",
"hint": "This code is run after the student's code but before the validation and unit tests. Expressions between curly braces are interpreted as variable substitution.",
"input_type": "code",
"default_value": "",
"evaluate": false
},
{
"name": "tests",
"label": "Marking tests",
"help_url": "",
"hint": "A list of tests used to mark the student's answer.<br />Each item is a list with three values:<br />\n<ul>\n<li>The name of the test, shown to the student above any feedback.</li>\n<li>The proportion of the credit available for the part to award if this test is passed.</li>\n<li>The code to run.</li>\n</ul>",
"input_type": "code",
"default_value": "[\n [\"Test 1\", 1, \"True\"]\n]",
"evaluate": true
},
{
"name": "validation_tests",
"label": "Validation tests",
"help_url": "",
"hint": "<p>A list of tests used to validate that the student's code is acceptable.<br />Each item is a list with two string values:</p>\n<ul>\n<li>A name which will be shown to the student if the test fails.</li>\n<li>The code to run.</li>\n</ul>",
"input_type": "code",
"default_value": "[\n [\"arithmetic works\", \"1+1 == 2\"]\n]",
"evaluate": true
},
{
"name": "variables",
"label": "Variables to include in code",
"help_url": "",
"hint": "Give a dictionary mapping variable names in the target language to their values, as JME expressions. These variables will be available in the code that is run. Use this to make the value of a Numbas question variable available in code.",
"input_type": "code",
"default_value": "dict()",
"evaluate": true
}
],
"public_availability": "always",
"published": true,
"extensions": [
"programming"
]
}