fix: a star in docs
This commit is contained in:
434
logic/autograder.py
Normal file
434
logic/autograder.py
Normal file
@ -0,0 +1,434 @@
|
||||
# autograder.py
|
||||
# -------------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
# imports from python standard library
|
||||
from __future__ import print_function
|
||||
import grading
|
||||
import importlib.util
|
||||
import optparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import projectParams
|
||||
import random
|
||||
random.seed(0)
|
||||
try:
|
||||
from pacman import GameState
|
||||
except:
|
||||
pass
|
||||
|
||||
# register arguments and set default values
|
||||
def readCommand(argv):
|
||||
parser = optparse.OptionParser(description='Run public tests on student code')
|
||||
parser.set_defaults(generateSolutions=False, edxOutput=False, gsOutput=False, muteOutput=False, printTestCase=False, noGraphics=False)
|
||||
# BEGIN SOLUTION NO PROMPT
|
||||
parser.set_defaults(generatePublicTests=False)
|
||||
# END SOLUTION NO PROMPT
|
||||
parser.add_option('--test-directory',
|
||||
dest='testRoot',
|
||||
default='test_cases',
|
||||
help='Root test directory which contains subdirectories corresponding to each question')
|
||||
parser.add_option('--student-code',
|
||||
dest='studentCode',
|
||||
default=projectParams.STUDENT_CODE_DEFAULT,
|
||||
help='comma separated list of student code files')
|
||||
parser.add_option('--code-directory',
|
||||
dest='codeRoot',
|
||||
default="",
|
||||
help='Root directory containing the student and testClass code')
|
||||
parser.add_option('--test-case-code',
|
||||
dest='testCaseCode',
|
||||
default=projectParams.PROJECT_TEST_CLASSES,
|
||||
help='class containing testClass classes for this project')
|
||||
parser.add_option('--generate-solutions',
|
||||
dest='generateSolutions',
|
||||
action='store_true',
|
||||
help='Write solutions generated to .solution file')
|
||||
parser.add_option('--edx-output',
|
||||
dest='edxOutput',
|
||||
action='store_true',
|
||||
help='Generate edX output files')
|
||||
parser.add_option('--gradescope-output',
|
||||
dest='gsOutput',
|
||||
action='store_true',
|
||||
help='Generate GradeScope output files')
|
||||
parser.add_option('--mute',
|
||||
dest='muteOutput',
|
||||
action='store_true',
|
||||
help='Mute output from executing tests')
|
||||
parser.add_option('--print-tests', '-p',
|
||||
dest='printTestCase',
|
||||
action='store_true',
|
||||
help='Print each test case before running them.')
|
||||
parser.add_option('--test', '-t',
|
||||
dest='runTest',
|
||||
default=None,
|
||||
help='Run one particular test. Relative to test root.')
|
||||
parser.add_option('--question', '-q',
|
||||
dest='gradeQuestion',
|
||||
default=None,
|
||||
help='Grade one particular question.')
|
||||
parser.add_option('--no-graphics',
|
||||
dest='noGraphics',
|
||||
action='store_true',
|
||||
help='No graphics display for pacman games.')
|
||||
# BEGIN SOLUTION NO PROMPT
|
||||
parser.add_option('--generate-public-tests',
|
||||
dest='generatePublicTests',
|
||||
action='store_true',
|
||||
help='Generate ./test_cases/* from ./private_test_cases/*')
|
||||
# END SOLUTION NO PROMPT
|
||||
(options, args) = parser.parse_args(argv)
|
||||
return options
|
||||
|
||||
|
||||
# confirm we should author solution files
|
||||
def confirmGenerate():
|
||||
print('WARNING: this action will overwrite any solution files.')
|
||||
print('Are you sure you want to proceed? (yes/no)')
|
||||
while True:
|
||||
ans = sys.stdin.readline().strip()
|
||||
if ans == 'yes':
|
||||
break
|
||||
elif ans == 'no':
|
||||
sys.exit(0)
|
||||
else:
|
||||
print('please answer either "yes" or "no"')
|
||||
|
||||
|
||||
# TODO: Fix this so that it tracebacks work correctly
|
||||
# Looking at source of the traceback module, presuming it works
|
||||
# the same as the intepreters, it uses co_filename. This is,
|
||||
# however, a readonly attribute.
|
||||
def setModuleName(module, filename):
|
||||
functionType = type(confirmGenerate)
|
||||
classType = type(optparse.Option)
|
||||
|
||||
for i in dir(module):
|
||||
o = getattr(module, i)
|
||||
if hasattr(o, '__file__'):
|
||||
continue
|
||||
|
||||
if type(o) == functionType:
|
||||
setattr(o, '__file__', filename)
|
||||
elif type(o) == classType:
|
||||
setattr(o, '__file__', filename)
|
||||
# TODO: assign member __file__'s?
|
||||
#print(i, type(o))
|
||||
|
||||
|
||||
#from cStringIO import StringIO
|
||||
|
||||
# def loadModuleString(moduleSource):
|
||||
# # Below broken, imp doesn't believe its being passed a file:
|
||||
# # ValueError: load_module arg#2 should be a file or None
|
||||
# #
|
||||
# #f = StringIO(moduleCodeDict[k])
|
||||
# #tmp = imp.load_module(k, f, k, (".py", "r", imp.PY_SOURCE))
|
||||
# tmp = imp.new_module(k)
|
||||
# exec(moduleCodeDict[k], tmp.__dict__)
|
||||
# setModuleName(tmp, k)
|
||||
# return tmp
|
||||
|
||||
import py_compile
|
||||
|
||||
def loadModuleFile(moduleName, filePath):
|
||||
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
|
||||
spec = importlib.util.spec_from_file_location(moduleName, filePath)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def readFile(path, root=""):
|
||||
"Read file from disk at specified path and return as string"
|
||||
with open(os.path.join(root, path), 'r') as handle:
|
||||
return handle.read()
|
||||
|
||||
|
||||
#######################################################################
|
||||
# Error Hint Map
|
||||
#######################################################################
|
||||
|
||||
# TODO: use these
|
||||
ERROR_HINT_MAP = {
|
||||
'q1': {
|
||||
"<type 'exceptions.IndexError'>": """
|
||||
We noticed that your project threw an IndexError on q1.
|
||||
While many things may cause this, it may have been from
|
||||
assuming a certain number of successors from a state space
|
||||
or assuming a certain number of actions available from a given
|
||||
state. Try making your code more general (no hardcoded indices)
|
||||
and submit again!
|
||||
"""
|
||||
},
|
||||
'q3': {
|
||||
"<type 'exceptions.AttributeError'>": """
|
||||
We noticed that your project threw an AttributeError on q3.
|
||||
While many things may cause this, it may have been from assuming
|
||||
a certain size or structure to the state space. For example, if you have
|
||||
a line of code assuming that the state is (x, y) and we run your code
|
||||
on a state space with (x, y, z), this error could be thrown. Try
|
||||
making your code more general and submit again!
|
||||
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
import pprint
|
||||
|
||||
def splitStrings(d):
|
||||
d2 = dict(d)
|
||||
for k in d:
|
||||
if k[0:2] == "__":
|
||||
del d2[k]
|
||||
continue
|
||||
if d2[k].find("\n") >= 0:
|
||||
d2[k] = d2[k].split("\n")
|
||||
return d2
|
||||
|
||||
|
||||
def printTest(testDict, solutionDict):
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
print("Test case:")
|
||||
for line in testDict["__raw_lines__"]:
|
||||
print(" |", line)
|
||||
print("Solution:")
|
||||
for line in solutionDict["__raw_lines__"]:
|
||||
print(" |", line)
|
||||
|
||||
|
||||
def runTest(testName, moduleDict, printTestCase=False, display=None):
|
||||
import testParser
|
||||
import testClasses
|
||||
for module in moduleDict:
|
||||
setattr(sys.modules[__name__], module, moduleDict[module])
|
||||
|
||||
testDict = testParser.TestParser(testName + ".test").parse()
|
||||
solutionDict = testParser.TestParser(testName + ".solution").parse()
|
||||
test_out_file = os.path.join('%s.test_output' % testName)
|
||||
testDict['test_out_file'] = test_out_file
|
||||
testClass = getattr(projectTestClasses, testDict['class'])
|
||||
|
||||
questionClass = getattr(testClasses, 'Question')
|
||||
question = questionClass({'max_points': 0}, display)
|
||||
testCase = testClass(question, testDict)
|
||||
|
||||
if printTestCase:
|
||||
printTest(testDict, solutionDict)
|
||||
|
||||
# This is a fragile hack to create a stub grades object
|
||||
grades = grading.Grades(projectParams.PROJECT_NAME, [(None, 0)])
|
||||
testCase.execute(grades, moduleDict, solutionDict)
|
||||
|
||||
|
||||
# returns all the tests you need to run in order to run question
|
||||
def getDepends(testParser, testRoot, question):
|
||||
allDeps = [question]
|
||||
questionDict = testParser.TestParser(os.path.join(testRoot, question, 'CONFIG')).parse()
|
||||
if 'depends' in questionDict:
|
||||
depends = questionDict['depends'].split()
|
||||
for d in depends:
|
||||
# run dependencies first
|
||||
allDeps = getDepends(testParser, testRoot, d) + allDeps
|
||||
return allDeps
|
||||
|
||||
# get list of questions to grade
|
||||
def getTestSubdirs(testParser, testRoot, questionToGrade):
|
||||
# THIS IS WHERE QUESTIONS ARE SPECIFIED
|
||||
problemDict = testParser.TestParser(os.path.join(testRoot, 'CONFIG')).parse()
|
||||
if questionToGrade != None:
|
||||
questions = getDepends(testParser, testRoot, questionToGrade)
|
||||
if len(questions) > 1:
|
||||
print('Note: due to dependencies, the following tests will be run: %s' % ' '.join(questions))
|
||||
return questions
|
||||
if 'order' in problemDict:
|
||||
return problemDict['order'].split()
|
||||
return sorted(os.listdir(testRoot))
|
||||
|
||||
|
||||
# evaluate student code
|
||||
def evaluate(generateSolutions, testRoot, moduleDict, exceptionMap=ERROR_HINT_MAP,
|
||||
edxOutput=False, muteOutput=False, gsOutput=False,
|
||||
printTestCase=False, questionToGrade=None, display=None):
|
||||
# imports of testbench code. note that the testClasses import must follow
|
||||
# the import of student code due to dependencies
|
||||
import testParser
|
||||
import testClasses
|
||||
for module in moduleDict:
|
||||
setattr(sys.modules[__name__], module, moduleDict[module])
|
||||
|
||||
questions = []
|
||||
questionDicts = {}
|
||||
# HERE IS WHERE QUESTIONS ARE CREATED
|
||||
test_subdirs = getTestSubdirs(testParser, testRoot, questionToGrade)
|
||||
for q in test_subdirs:
|
||||
subdir_path = os.path.join(testRoot, q)
|
||||
if not os.path.isdir(subdir_path) or q[0] == '.':
|
||||
continue
|
||||
|
||||
# create a question object
|
||||
questionDict = testParser.TestParser(os.path.join(subdir_path, 'CONFIG')).parse()
|
||||
questionClass = getattr(testClasses, questionDict['class'])
|
||||
question = questionClass(questionDict, display)
|
||||
questionDicts[q] = questionDict
|
||||
|
||||
# load test cases into question
|
||||
tests = [t for t in os.listdir(
|
||||
subdir_path) if re.match(r'[^#~.].*\.test\Z', t)]
|
||||
tests = [re.match(r'(.*)\.test\Z', t).group(1) for t in tests]
|
||||
for t in sorted(tests):
|
||||
test_file = os.path.join(subdir_path, '%s.test' % t)
|
||||
solution_file = os.path.join(subdir_path, '%s.solution' % t)
|
||||
test_out_file = os.path.join(subdir_path, '%s.test_output' % t)
|
||||
testDict = testParser.TestParser(test_file).parse()
|
||||
if testDict.get("disabled", "false").lower() == "true":
|
||||
continue
|
||||
testDict['test_out_file'] = test_out_file
|
||||
testClass = getattr(projectTestClasses, testDict['class'])
|
||||
testCase = testClass(question, testDict)
|
||||
|
||||
def makefun(testCase, solution_file):
|
||||
if generateSolutions:
|
||||
# write solution file to disk
|
||||
return lambda grades: testCase.writeSolution(moduleDict, solution_file)
|
||||
else:
|
||||
# read in solution dictionary and pass as an argument
|
||||
testDict = testParser.TestParser(test_file).parse()
|
||||
solutionDict = testParser.TestParser(solution_file).parse()
|
||||
if printTestCase:
|
||||
return lambda grades: printTest(testDict, solutionDict) or testCase.execute(grades, moduleDict, solutionDict)
|
||||
else:
|
||||
return lambda grades: testCase.execute(grades, moduleDict, solutionDict)
|
||||
question.addTestCase(testCase, makefun(testCase, solution_file))
|
||||
|
||||
# Note extra function is necessary for scoping reasons
|
||||
def makefun(question):
|
||||
return lambda grades: question.execute(grades)
|
||||
setattr(sys.modules[__name__], q, makefun(question))
|
||||
questions.append((q, question.getMaxPoints()))
|
||||
|
||||
grades = grading.Grades(projectParams.PROJECT_NAME, questions,
|
||||
gsOutput=gsOutput, edxOutput=edxOutput, muteOutput=muteOutput)
|
||||
if questionToGrade == None:
|
||||
for q in questionDicts:
|
||||
for prereq in questionDicts[q].get('depends', '').split():
|
||||
grades.addPrereq(q, prereq)
|
||||
|
||||
grades.grade(sys.modules[__name__], bonusPic=projectParams.BONUS_PIC)
|
||||
return grades.points
|
||||
|
||||
|
||||
def getDisplay(graphicsByDefault, options=None):
|
||||
graphics = graphicsByDefault
|
||||
if options is not None and options.noGraphics:
|
||||
graphics = False
|
||||
if graphics:
|
||||
try:
|
||||
import graphicsDisplay
|
||||
return graphicsDisplay.PacmanGraphics(1, frameTime=.05)
|
||||
except ImportError:
|
||||
pass
|
||||
import textDisplay
|
||||
return textDisplay.NullGraphics()
|
||||
|
||||
# BEGIN SOLUTION NO PROMPT
|
||||
import shutil
|
||||
|
||||
def copy(srcDir, destDir, filename):
|
||||
srcFilename = os.path.join(srcDir, filename)
|
||||
destFilename = os.path.join(destDir, filename)
|
||||
print("Copying {} -> {}".format(srcFilename, destFilename))
|
||||
shutil.copy(srcFilename, destFilename)
|
||||
# with open(os.path.join(srcDir, filename), 'r') as f1:
|
||||
# with open(os.path.join(destDir, filename), 'w') as f2:
|
||||
# f2.write(f1.read())
|
||||
|
||||
def generatePublicTests(moduleDict, privateRoot='private_test_cases', publicRoot='test_cases'):
|
||||
import testParser
|
||||
import testClasses
|
||||
for module in moduleDict:
|
||||
setattr(sys.modules[__name__], module, moduleDict[module])
|
||||
|
||||
if not os.path.exists(publicRoot): os.mkdir(publicRoot)
|
||||
copy(privateRoot, publicRoot, 'CONFIG')
|
||||
for q in sorted(os.listdir(privateRoot)):
|
||||
private_subdir_path = os.path.join(privateRoot, q)
|
||||
public_subdir_path = os.path.join(publicRoot, q)
|
||||
if not os.path.exists(public_subdir_path): os.mkdir(public_subdir_path)
|
||||
|
||||
if not os.path.isdir(private_subdir_path) or q[0] == '.':
|
||||
continue
|
||||
|
||||
copy(private_subdir_path, public_subdir_path, 'CONFIG')
|
||||
|
||||
# create a question object
|
||||
questionDict = testParser.TestParser(os.path.join(public_subdir_path, 'CONFIG')).parse()
|
||||
questionClass = getattr(testClasses, questionDict['class'])
|
||||
question = questionClass(questionDict, getDisplay(False))
|
||||
|
||||
tests = list(filter(lambda t: re.match(r'[^#~.].*\.test\Z', t), os.listdir(private_subdir_path)))
|
||||
tests = list(map(lambda t: re.match(r'(.*)\.test\Z', t).group(1), tests))
|
||||
for t in sorted(tests):
|
||||
test_file = os.path.join(private_subdir_path, '%s.test' % t)
|
||||
public_test_file = os.path.join(public_subdir_path, '%s.test' % t)
|
||||
test_out_file = os.path.join(public_subdir_path, '%s.test_output' % t)
|
||||
print("Creating public test case {} from {}".format(public_test_file, test_file))
|
||||
|
||||
testDict = testParser.TestParser(test_file).parse()
|
||||
if testDict.get("disabled", "false").lower() == "true":
|
||||
continue
|
||||
testDict['test_out_file'] = test_out_file
|
||||
testClass = getattr(projectTestClasses, testDict['class'])
|
||||
testCase = testClass(question, testDict)
|
||||
|
||||
testCase.createPublicVersion()
|
||||
testCase.emitPublicVersion(public_test_file)
|
||||
# END SOLUTION NO PROMPT
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
options = readCommand(sys.argv)
|
||||
if options.generateSolutions:
|
||||
confirmGenerate()
|
||||
codePaths = options.studentCode.split(',')
|
||||
# moduleCodeDict = {}
|
||||
# for cp in codePaths:
|
||||
# moduleName = re.match(r'.*?([^/]*)\.py', cp).group(1)
|
||||
# moduleCodeDict[moduleName] = readFile(cp, root=options.codeRoot)
|
||||
# moduleCodeDict['projectTestClasses'] = readFile(options.testCaseCode, root=options.codeRoot)
|
||||
# moduleDict = loadModuleDict(moduleCodeDict)
|
||||
|
||||
moduleDict = {}
|
||||
for cp in codePaths:
|
||||
moduleName = re.match(r'.*?([^/]*)\.py', cp).group(1)
|
||||
moduleDict[moduleName] = loadModuleFile(moduleName, os.path.join(options.codeRoot, cp))
|
||||
|
||||
moduleName = re.match(r'.*?([^/]*)\.py', options.testCaseCode).group(1)
|
||||
moduleDict['projectTestClasses'] = loadModuleFile(moduleName, os.path.join(options.codeRoot, options.testCaseCode))
|
||||
|
||||
# BEGIN SOLUTION NO PROMPT
|
||||
if options.generatePublicTests:
|
||||
generatePublicTests(moduleDict)
|
||||
sys.exit()
|
||||
# END SOLUTION NO PROMPT
|
||||
|
||||
if options.runTest != None:
|
||||
runTest(options.runTest, moduleDict, printTestCase=options.printTestCase, display=getDisplay(True, options))
|
||||
else:
|
||||
evaluate(options.generateSolutions, options.testRoot, moduleDict,
|
||||
gsOutput=options.gsOutput,
|
||||
edxOutput=options.edxOutput, muteOutput=options.muteOutput, printTestCase=options.printTestCase,
|
||||
questionToGrade=options.gradeQuestion, display=getDisplay(options.gradeQuestion != None, options))
|
Reference in New Issue
Block a user