feat: p0 and p1
This commit is contained in:
23
tutorial/addition.py
Normal file
23
tutorial/addition.py
Normal file
@ -0,0 +1,23 @@
|
||||
# addition.py
|
||||
# -----------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
"""
|
||||
Run python autograder.py
|
||||
"""
|
||||
|
||||
|
||||
def add(a, b):
|
||||
"Return the sum of a and b"
|
||||
"*** YOUR CODE HERE ***"
|
||||
return 0
|
395
tutorial/autograder.py
Normal file
395
tutorial/autograder.py
Normal file
@ -0,0 +1,395 @@
|
||||
# autograder.py
|
||||
# -------------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
# imports from python standard library
|
||||
from __future__ import print_function
|
||||
import grading
|
||||
import importlib.util
|
||||
import optparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import projectParams
|
||||
import random
|
||||
random.seed(0)
|
||||
try:
|
||||
from pacman import GameState
|
||||
except:
|
||||
pass
|
||||
|
||||
# register arguments and set default values
|
||||
def readCommand(argv):
|
||||
parser = optparse.OptionParser(description = 'Run public tests on student code')
|
||||
parser.set_defaults(generateSolutions=False, edxOutput=False, gsOutput=False, muteOutput=False, printTestCase=False, noGraphics=False)
|
||||
parser.add_option('--test-directory',
|
||||
dest = 'testRoot',
|
||||
default = 'test_cases',
|
||||
help = 'Root test directory which contains subdirectories corresponding to each question')
|
||||
parser.add_option('--student-code',
|
||||
dest = 'studentCode',
|
||||
default = projectParams.STUDENT_CODE_DEFAULT,
|
||||
help = 'comma separated list of student code files')
|
||||
parser.add_option('--code-directory',
|
||||
dest = 'codeRoot',
|
||||
default = "",
|
||||
help = 'Root directory containing the student and testClass code')
|
||||
parser.add_option('--test-case-code',
|
||||
dest = 'testCaseCode',
|
||||
default = projectParams.PROJECT_TEST_CLASSES,
|
||||
help = 'class containing testClass classes for this project')
|
||||
parser.add_option('--generate-solutions',
|
||||
dest = 'generateSolutions',
|
||||
action = 'store_true',
|
||||
help = 'Write solutions generated to .solution file')
|
||||
parser.add_option('--edx-output',
|
||||
dest = 'edxOutput',
|
||||
action = 'store_true',
|
||||
help = 'Generate edX output files')
|
||||
parser.add_option('--gradescope-output',
|
||||
dest = 'gsOutput',
|
||||
action = 'store_true',
|
||||
help = 'Generate GradeScope output files')
|
||||
parser.add_option('--mute',
|
||||
dest = 'muteOutput',
|
||||
action = 'store_true',
|
||||
help = 'Mute output from executing tests')
|
||||
parser.add_option('--print-tests', '-p',
|
||||
dest = 'printTestCase',
|
||||
action = 'store_true',
|
||||
help = 'Print each test case before running them.')
|
||||
parser.add_option('--test', '-t',
|
||||
dest = 'runTest',
|
||||
default = None,
|
||||
help = 'Run one particular test. Relative to test root.')
|
||||
parser.add_option('--question', '-q',
|
||||
dest = 'gradeQuestion',
|
||||
default = None,
|
||||
help = 'Grade one particular question.')
|
||||
parser.add_option('--no-graphics',
|
||||
dest = 'noGraphics',
|
||||
action = 'store_true',
|
||||
help = 'No graphics display for pacman games.')
|
||||
parser.add_option('--check-dependencies',
|
||||
dest = 'check_dependencies',
|
||||
action = 'store_true',
|
||||
help = 'check that dependencies are installed')
|
||||
(options, args) = parser.parse_args(argv)
|
||||
return options
|
||||
|
||||
|
||||
# confirm we should author solution files
|
||||
def confirmGenerate():
|
||||
print('WARNING: this action will overwrite any solution files.')
|
||||
print('Are you sure you want to proceed? (yes/no)')
|
||||
while True:
|
||||
ans = sys.stdin.readline().strip()
|
||||
if ans == 'yes':
|
||||
break
|
||||
elif ans == 'no':
|
||||
sys.exit(0)
|
||||
else:
|
||||
print('please answer either "yes" or "no"')
|
||||
|
||||
|
||||
# TODO: Fix this so that it tracebacks work correctly
|
||||
# Looking at source of the traceback module, presuming it works
|
||||
# the same as the intepreters, it uses co_filename. This is,
|
||||
# however, a readonly attribute.
|
||||
def setModuleName(module, filename):
|
||||
functionType = type(confirmGenerate)
|
||||
classType = type(optparse.Option)
|
||||
|
||||
for i in dir(module):
|
||||
o = getattr(module, i)
|
||||
if hasattr(o, '__file__'): continue
|
||||
|
||||
if type(o) == functionType:
|
||||
setattr(o, '__file__', filename)
|
||||
elif type(o) == classType:
|
||||
setattr(o, '__file__', filename)
|
||||
# TODO: assign member __file__'s?
|
||||
#print i, type(o)
|
||||
|
||||
|
||||
#from cStringIO import StringIO
|
||||
|
||||
def loadModuleString(moduleSource):
|
||||
# Below broken, imp doesn't believe its being passed a file:
|
||||
# ValueError: load_module arg#2 should be a file or None
|
||||
#
|
||||
#f = StringIO(moduleCodeDict[k])
|
||||
#tmp = imp.load_module(k, f, k, (".py", "r", imp.PY_SOURCE))
|
||||
tmp = imp.new_module(k)
|
||||
#exec moduleCodeDict[k] in tmp.__dict__
|
||||
setModuleName(tmp, k)
|
||||
return tmp
|
||||
|
||||
import py_compile
|
||||
|
||||
def loadModuleFile(moduleName, filePath):
|
||||
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
|
||||
spec = importlib.util.spec_from_file_location(moduleName, filePath)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def readFile(path, root=""):
|
||||
"Read file from disk at specified path and return as string"
|
||||
with open(os.path.join(root, path), 'r') as handle:
|
||||
return handle.read()
|
||||
|
||||
|
||||
#######################################################################
|
||||
# Error Hint Map
|
||||
#######################################################################
|
||||
|
||||
# TODO: use these
|
||||
ERROR_HINT_MAP = {
|
||||
'q1': {
|
||||
"<type 'exceptions.IndexError'>": """
|
||||
We noticed that your project threw an IndexError on q1.
|
||||
While many things may cause this, it may have been from
|
||||
assuming a certain number of successors from a state space
|
||||
or assuming a certain number of actions available from a given
|
||||
state. Try making your code more general (no hardcoded indices)
|
||||
and submit again!
|
||||
"""
|
||||
},
|
||||
'q3': {
|
||||
"<type 'exceptions.AttributeError'>": """
|
||||
We noticed that your project threw an AttributeError on q3.
|
||||
While many things may cause this, it may have been from assuming
|
||||
a certain size or structure to the state space. For example, if you have
|
||||
a line of code assuming that the state is (x, y) and we run your code
|
||||
on a state space with (x, y, z), this error could be thrown. Try
|
||||
making your code more general and submit again!
|
||||
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
import pprint
|
||||
|
||||
def splitStrings(d):
|
||||
d2 = dict(d)
|
||||
for k in d:
|
||||
if k[0:2] == "__":
|
||||
del d2[k]
|
||||
continue
|
||||
if d2[k].find("\n") >= 0:
|
||||
d2[k] = d2[k].split("\n")
|
||||
return d2
|
||||
|
||||
|
||||
def printTest(testDict, solutionDict):
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
print("Test case:")
|
||||
for line in testDict["__raw_lines__"]:
|
||||
print(" |", line)
|
||||
print("Solution:")
|
||||
for line in solutionDict["__raw_lines__"]:
|
||||
print(" |", line)
|
||||
|
||||
|
||||
def runTest(testName, moduleDict, printTestCase=False, display=None):
|
||||
import testParser
|
||||
import testClasses
|
||||
for module in moduleDict:
|
||||
setattr(sys.modules[__name__], module, moduleDict[module])
|
||||
|
||||
testDict = testParser.TestParser(testName + ".test").parse()
|
||||
solutionDict = testParser.TestParser(testName + ".solution").parse()
|
||||
test_out_file = os.path.join('%s.test_output' % testName)
|
||||
testDict['test_out_file'] = test_out_file
|
||||
testClass = getattr(projectTestClasses, testDict['class'])
|
||||
|
||||
questionClass = getattr(testClasses, 'Question')
|
||||
question = questionClass({'max_points': 0}, display)
|
||||
testCase = testClass(question, testDict)
|
||||
|
||||
if printTestCase:
|
||||
printTest(testDict, solutionDict)
|
||||
|
||||
# This is a fragile hack to create a stub grades object
|
||||
grades = grading.Grades(projectParams.PROJECT_NAME, [(None,0)])
|
||||
testCase.execute(grades, moduleDict, solutionDict)
|
||||
|
||||
|
||||
# returns all the tests you need to run in order to run question
|
||||
def getDepends(testParser, testRoot, question):
|
||||
allDeps = [question]
|
||||
questionDict = testParser.TestParser(os.path.join(testRoot, question, 'CONFIG')).parse()
|
||||
if 'depends' in questionDict:
|
||||
depends = questionDict['depends'].split()
|
||||
for d in depends:
|
||||
# run dependencies first
|
||||
allDeps = getDepends(testParser, testRoot, d) + allDeps
|
||||
return allDeps
|
||||
|
||||
# get list of questions to grade
|
||||
def getTestSubdirs(testParser, testRoot, questionToGrade):
|
||||
problemDict = testParser.TestParser(os.path.join(testRoot, 'CONFIG')).parse()
|
||||
if questionToGrade != None:
|
||||
questions = getDepends(testParser, testRoot, questionToGrade)
|
||||
if len(questions) > 1:
|
||||
print('Note: due to dependencies, the following tests will be run: %s' % ' '.join(questions))
|
||||
return questions
|
||||
if 'order' in problemDict:
|
||||
return problemDict['order'].split()
|
||||
return sorted(os.listdir(testRoot))
|
||||
|
||||
|
||||
# evaluate student code
|
||||
def evaluate(generateSolutions, testRoot, moduleDict, exceptionMap=ERROR_HINT_MAP,
|
||||
edxOutput=False, muteOutput=False, gsOutput=False,
|
||||
printTestCase=False, questionToGrade=None, display=None):
|
||||
# imports of testbench code. note that the testClasses import must follow
|
||||
# the import of student code due to dependencies
|
||||
import testParser
|
||||
import testClasses
|
||||
for module in moduleDict:
|
||||
setattr(sys.modules[__name__], module, moduleDict[module])
|
||||
|
||||
questions = []
|
||||
questionDicts = {}
|
||||
test_subdirs = getTestSubdirs(testParser, testRoot, questionToGrade)
|
||||
for q in test_subdirs:
|
||||
subdir_path = os.path.join(testRoot, q)
|
||||
if not os.path.isdir(subdir_path) or q[0] == '.':
|
||||
continue
|
||||
|
||||
# create a question object
|
||||
questionDict = testParser.TestParser(os.path.join(subdir_path, 'CONFIG')).parse()
|
||||
questionClass = getattr(testClasses, questionDict['class'])
|
||||
question = questionClass(questionDict, display)
|
||||
questionDicts[q] = questionDict
|
||||
|
||||
# load test cases into question
|
||||
tests = filter(lambda t: re.match('[^#~.].*\.test\Z', t), os.listdir(subdir_path))
|
||||
tests = map(lambda t: re.match('(.*)\.test\Z', t).group(1), tests)
|
||||
for t in sorted(tests):
|
||||
test_file = os.path.join(subdir_path, '%s.test' % t)
|
||||
solution_file = os.path.join(subdir_path, '%s.solution' % t)
|
||||
test_out_file = os.path.join(subdir_path, '%s.test_output' % t)
|
||||
testDict = testParser.TestParser(test_file).parse()
|
||||
if testDict.get("disabled", "false").lower() == "true":
|
||||
continue
|
||||
testDict['test_out_file'] = test_out_file
|
||||
testClass = getattr(projectTestClasses, testDict['class'])
|
||||
testCase = testClass(question, testDict)
|
||||
def makefun(testCase, solution_file):
|
||||
if generateSolutions:
|
||||
# write solution file to disk
|
||||
return lambda grades: testCase.writeSolution(moduleDict, solution_file)
|
||||
else:
|
||||
# read in solution dictionary and pass as an argument
|
||||
testDict = testParser.TestParser(test_file).parse()
|
||||
solutionDict = testParser.TestParser(solution_file).parse()
|
||||
if printTestCase:
|
||||
return lambda grades: printTest(testDict, solutionDict) or testCase.execute(grades, moduleDict, solutionDict)
|
||||
else:
|
||||
return lambda grades: testCase.execute(grades, moduleDict, solutionDict)
|
||||
question.addTestCase(testCase, makefun(testCase, solution_file))
|
||||
|
||||
# Note extra function is necessary for scoping reasons
|
||||
def makefun(question):
|
||||
return lambda grades: question.execute(grades)
|
||||
setattr(sys.modules[__name__], q, makefun(question))
|
||||
questions.append((q, question.getMaxPoints()))
|
||||
|
||||
grades = grading.Grades(projectParams.PROJECT_NAME, questions,
|
||||
gsOutput=gsOutput, edxOutput=edxOutput, muteOutput=muteOutput)
|
||||
if questionToGrade == None:
|
||||
for q in questionDicts:
|
||||
for prereq in questionDicts[q].get('depends', '').split():
|
||||
grades.addPrereq(q, prereq)
|
||||
|
||||
grades.grade(sys.modules[__name__], bonusPic = projectParams.BONUS_PIC)
|
||||
return grades.points
|
||||
|
||||
|
||||
|
||||
def getDisplay(graphicsByDefault, options=None):
|
||||
graphics = graphicsByDefault
|
||||
if options is not None and options.noGraphics:
|
||||
graphics = False
|
||||
if graphics:
|
||||
try:
|
||||
import graphicsDisplay
|
||||
return graphicsDisplay.PacmanGraphics(1, frameTime=.05)
|
||||
except ImportError:
|
||||
pass
|
||||
import textDisplay
|
||||
return textDisplay.NullGraphics()
|
||||
|
||||
def check_dependencies():
|
||||
# display across projects
|
||||
import tkinter
|
||||
|
||||
# machine learning project
|
||||
import numpy as np
|
||||
import matplotlib
|
||||
import contextlib
|
||||
import matplotlib.pyplot as plt
|
||||
import time
|
||||
fig, ax = plt.subplots(1, 1)
|
||||
ax.set_xlim([-1, 1])
|
||||
ax.set_ylim([-1, 1])
|
||||
line, = ax.plot([], [], color="black")
|
||||
plt.show(block=False)
|
||||
|
||||
for t in range(400):
|
||||
angle = t * 0.05
|
||||
x = np.sin(angle)
|
||||
y = np.cos(angle)
|
||||
line.set_data([x,-x], [y,-y])
|
||||
fig.canvas.draw_idle()
|
||||
fig.canvas.start_event_loop(1e-3)
|
||||
|
||||
# TODO: add dependencies for logic project for Spring, etc.
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
options = readCommand(sys.argv)
|
||||
if options.check_dependencies:
|
||||
check_dependencies()
|
||||
exit()
|
||||
|
||||
if options.generateSolutions:
|
||||
confirmGenerate()
|
||||
codePaths = options.studentCode.split(',')
|
||||
# moduleCodeDict = {}
|
||||
# for cp in codePaths:
|
||||
# moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
|
||||
# moduleCodeDict[moduleName] = readFile(cp, root=options.codeRoot)
|
||||
# moduleCodeDict['projectTestClasses'] = readFile(options.testCaseCode, root=options.codeRoot)
|
||||
# moduleDict = loadModuleDict(moduleCodeDict)
|
||||
|
||||
moduleDict = {}
|
||||
for cp in codePaths:
|
||||
moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
|
||||
moduleDict[moduleName] = loadModuleFile(moduleName, os.path.join(options.codeRoot, cp))
|
||||
moduleName = re.match('.*?([^/]*)\.py', options.testCaseCode).group(1)
|
||||
moduleDict['projectTestClasses'] = loadModuleFile(moduleName, os.path.join(options.codeRoot, options.testCaseCode))
|
||||
|
||||
|
||||
if options.runTest != None:
|
||||
runTest(options.runTest, moduleDict, printTestCase=options.printTestCase, display=getDisplay(True, options))
|
||||
else:
|
||||
evaluate(options.generateSolutions, options.testRoot, moduleDict,
|
||||
gsOutput=options.gsOutput,
|
||||
edxOutput=options.edxOutput, muteOutput=options.muteOutput, printTestCase=options.printTestCase,
|
||||
questionToGrade=options.gradeQuestion, display=getDisplay(options.gradeQuestion!=None, options))
|
46
tutorial/buyLotsOfFruit.py
Normal file
46
tutorial/buyLotsOfFruit.py
Normal file
@ -0,0 +1,46 @@
|
||||
# buyLotsOfFruit.py
|
||||
# -----------------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
"""
|
||||
To run this script, type
|
||||
|
||||
python buyLotsOfFruit.py
|
||||
|
||||
Once you have correctly implemented the buyLotsOfFruit function,
|
||||
the script should produce the output:
|
||||
|
||||
Cost of [('apples', 2.0), ('pears', 3.0), ('limes', 4.0)] is 12.25
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
fruitPrices = {'apples': 2.00, 'oranges': 1.50, 'pears': 1.75,
|
||||
'limes': 0.75, 'strawberries': 1.00}
|
||||
|
||||
|
||||
def buyLotsOfFruit(orderList):
|
||||
"""
|
||||
orderList: List of (fruit, numPounds) tuples
|
||||
|
||||
Returns cost of order
|
||||
"""
|
||||
totalCost = 0.0
|
||||
"*** YOUR CODE HERE ***"
|
||||
return totalCost
|
||||
|
||||
|
||||
# Main Method
|
||||
if __name__ == '__main__':
|
||||
"This code runs when you invoke the script from the command line"
|
||||
orderList = [('apples', 2.0), ('pears', 3.0), ('limes', 4.0)]
|
||||
print('Cost of', orderList, 'is', buyLotsOfFruit(orderList))
|
322
tutorial/grading.py
Normal file
322
tutorial/grading.py
Normal file
@ -0,0 +1,322 @@
|
||||
# grading.py
|
||||
# ----------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
"Common code for autograders"
|
||||
|
||||
from __future__ import print_function
|
||||
import html
|
||||
import time
|
||||
import sys
|
||||
import json
|
||||
import traceback
|
||||
import pdb
|
||||
from collections import defaultdict
|
||||
import util
|
||||
|
||||
|
||||
class Grades:
|
||||
"A data structure for project grades, along with formatting code to display them"
|
||||
|
||||
def __init__(self, projectName, questionsAndMaxesList,
|
||||
gsOutput=False, edxOutput=False, muteOutput=False):
|
||||
"""
|
||||
Defines the grading scheme for a project
|
||||
projectName: project name
|
||||
questionsAndMaxesDict: a list of (question name, max points per question)
|
||||
"""
|
||||
self.questions = [el[0] for el in questionsAndMaxesList]
|
||||
self.maxes = dict(questionsAndMaxesList)
|
||||
self.points = Counter()
|
||||
self.messages = dict([(q, []) for q in self.questions])
|
||||
self.project = projectName
|
||||
self.start = time.localtime()[1:6]
|
||||
self.sane = True # Sanity checks
|
||||
self.currentQuestion = None # Which question we're grading
|
||||
self.edxOutput = edxOutput
|
||||
self.gsOutput = gsOutput # GradeScope output
|
||||
self.mute = muteOutput
|
||||
self.prereqs = defaultdict(set)
|
||||
|
||||
# print 'Autograder transcript for %s' % self.project
|
||||
print('Starting on %d-%d at %d:%02d:%02d' % self.start)
|
||||
|
||||
def addPrereq(self, question, prereq):
|
||||
self.prereqs[question].add(prereq)
|
||||
|
||||
def grade(self, gradingModule, exceptionMap={}, bonusPic=False):
|
||||
"""
|
||||
Grades each question
|
||||
gradingModule: the module with all the grading functions (pass in with sys.modules[__name__])
|
||||
"""
|
||||
|
||||
completedQuestions = set([])
|
||||
for q in self.questions:
|
||||
print('\nQuestion %s' % q)
|
||||
print('=' * (9 + len(q)))
|
||||
print()
|
||||
self.currentQuestion = q
|
||||
|
||||
incompleted = self.prereqs[q].difference(completedQuestions)
|
||||
if len(incompleted) > 0:
|
||||
prereq = incompleted.pop()
|
||||
print( \
|
||||
"""*** NOTE: Make sure to complete Question %s before working on Question %s,
|
||||
*** because Question %s builds upon your answer for Question %s.
|
||||
""" % (prereq, q, q, prereq))
|
||||
continue
|
||||
|
||||
if self.mute: util.mutePrint()
|
||||
try:
|
||||
util.TimeoutFunction(getattr(gradingModule, q), 1800)(self) # Call the question's function
|
||||
# TimeoutFunction(getattr(gradingModule, q),1200)(self) # Call the question's function
|
||||
except Exception as inst: # originally, Exception, inst
|
||||
self.addExceptionMessage(q, inst, traceback)
|
||||
self.addErrorHints(exceptionMap, inst, q[1])
|
||||
except:
|
||||
self.fail('FAIL: Terminated with a string exception.')
|
||||
finally:
|
||||
if self.mute: util.unmutePrint()
|
||||
|
||||
if self.points[q] >= self.maxes[q]:
|
||||
completedQuestions.add(q)
|
||||
|
||||
print('\n### Question %s: %d/%d ###\n' % (q, self.points[q], self.maxes[q]))
|
||||
|
||||
print('\nFinished at %d:%02d:%02d' % time.localtime()[3:6])
|
||||
print("\nProvisional grades\n==================")
|
||||
|
||||
for q in self.questions:
|
||||
print('Question %s: %d/%d' % (q, self.points[q], self.maxes[q]))
|
||||
print('------------------')
|
||||
print('Total: %d/%d' % (self.points.totalCount(), sum(self.maxes.values())))
|
||||
if bonusPic and self.points.totalCount() == 25:
|
||||
print("""
|
||||
|
||||
ALL HAIL GRANDPAC.
|
||||
LONG LIVE THE GHOSTBUSTING KING.
|
||||
|
||||
--- ---- ---
|
||||
| \ / + \ / |
|
||||
| + \--/ \--/ + |
|
||||
| + + |
|
||||
| + + + |
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
\ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
\ / @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
V \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
\ / @@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
V @@@@@@@@@@@@@@@@@@@@@@@@
|
||||
@@@@@@@@@@@@@@@@@@@@@@
|
||||
/\ @@@@@@@@@@@@@@@@@@@@@@
|
||||
/ \ @@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
/\ / @@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
/ \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
/ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
@@@@@@@@@@@@@@@@@@
|
||||
|
||||
""")
|
||||
print("""
|
||||
Your grades are NOT yet registered. To register your grades, make sure
|
||||
to follow your instructor's guidelines to receive credit on your project.
|
||||
""")
|
||||
|
||||
if self.edxOutput:
|
||||
self.produceOutput()
|
||||
if self.gsOutput:
|
||||
self.produceGradeScopeOutput()
|
||||
|
||||
def addExceptionMessage(self, q, inst, traceback):
|
||||
"""
|
||||
Method to format the exception message, this is more complicated because
|
||||
we need to html.escape the traceback but wrap the exception in a <pre> tag
|
||||
"""
|
||||
self.fail('FAIL: Exception raised: %s' % inst)
|
||||
self.addMessage('')
|
||||
for line in traceback.format_exc().split('\n'):
|
||||
self.addMessage(line)
|
||||
|
||||
def addErrorHints(self, exceptionMap, errorInstance, questionNum):
|
||||
typeOf = str(type(errorInstance))
|
||||
questionName = 'q' + questionNum
|
||||
errorHint = ''
|
||||
|
||||
# question specific error hints
|
||||
if exceptionMap.get(questionName):
|
||||
questionMap = exceptionMap.get(questionName)
|
||||
if (questionMap.get(typeOf)):
|
||||
errorHint = questionMap.get(typeOf)
|
||||
# fall back to general error messages if a question specific
|
||||
# one does not exist
|
||||
if (exceptionMap.get(typeOf)):
|
||||
errorHint = exceptionMap.get(typeOf)
|
||||
|
||||
# dont include the HTML if we have no error hint
|
||||
if not errorHint:
|
||||
return ''
|
||||
|
||||
for line in errorHint.split('\n'):
|
||||
self.addMessage(line)
|
||||
|
||||
def produceGradeScopeOutput(self):
|
||||
out_dct = {}
|
||||
|
||||
# total of entire submission
|
||||
total_possible = sum(self.maxes.values())
|
||||
total_score = sum(self.points.values())
|
||||
out_dct['score'] = total_score
|
||||
out_dct['max_score'] = total_possible
|
||||
out_dct['output'] = "Total score (%d / %d)" % (total_score, total_possible)
|
||||
|
||||
# individual tests
|
||||
tests_out = []
|
||||
for name in self.questions:
|
||||
test_out = {}
|
||||
# test name
|
||||
test_out['name'] = name
|
||||
# test score
|
||||
test_out['score'] = self.points[name]
|
||||
test_out['max_score'] = self.maxes[name]
|
||||
# others
|
||||
is_correct = self.points[name] >= self.maxes[name]
|
||||
test_out['output'] = " Question {num} ({points}/{max}) {correct}".format(
|
||||
num=(name[1] if len(name) == 2 else name),
|
||||
points=test_out['score'],
|
||||
max=test_out['max_score'],
|
||||
correct=('X' if not is_correct else ''),
|
||||
)
|
||||
test_out['tags'] = []
|
||||
tests_out.append(test_out)
|
||||
out_dct['tests'] = tests_out
|
||||
|
||||
# file output
|
||||
with open('gradescope_response.json', 'w') as outfile:
|
||||
json.dump(out_dct, outfile)
|
||||
return
|
||||
|
||||
def produceOutput(self):
|
||||
edxOutput = open('edx_response.html', 'w')
|
||||
edxOutput.write("<div>")
|
||||
|
||||
# first sum
|
||||
total_possible = sum(self.maxes.values())
|
||||
total_score = sum(self.points.values())
|
||||
checkOrX = '<span class="incorrect"/>'
|
||||
if (total_score >= total_possible):
|
||||
checkOrX = '<span class="correct"/>'
|
||||
header = """
|
||||
<h3>
|
||||
Total score ({total_score} / {total_possible})
|
||||
</h3>
|
||||
""".format(total_score=total_score,
|
||||
total_possible=total_possible,
|
||||
checkOrX=checkOrX
|
||||
)
|
||||
edxOutput.write(header)
|
||||
|
||||
for q in self.questions:
|
||||
if len(q) == 2:
|
||||
name = q[1]
|
||||
else:
|
||||
name = q
|
||||
checkOrX = '<span class="incorrect"/>'
|
||||
if (self.points[q] >= self.maxes[q]):
|
||||
checkOrX = '<span class="correct"/>'
|
||||
# messages = '\n<br/>\n'.join(self.messages[q])
|
||||
messages = "<pre>%s</pre>" % '\n'.join(self.messages[q])
|
||||
output = """
|
||||
<div class="test">
|
||||
<section>
|
||||
<div class="shortform">
|
||||
Question {q} ({points}/{max}) {checkOrX}
|
||||
</div>
|
||||
<div class="longform">
|
||||
{messages}
|
||||
</div>
|
||||
</section>
|
||||
</div>
|
||||
""".format(q=name,
|
||||
max=self.maxes[q],
|
||||
messages=messages,
|
||||
checkOrX=checkOrX,
|
||||
points=self.points[q]
|
||||
)
|
||||
# print "*** output for Question %s " % q[1]
|
||||
# print output
|
||||
edxOutput.write(output)
|
||||
edxOutput.write("</div>")
|
||||
edxOutput.close()
|
||||
edxOutput = open('edx_grade', 'w')
|
||||
edxOutput.write(str(self.points.totalCount()))
|
||||
edxOutput.close()
|
||||
|
||||
def fail(self, message, raw=False):
|
||||
"Sets sanity check bit to false and outputs a message"
|
||||
self.sane = False
|
||||
self.assignZeroCredit()
|
||||
self.addMessage(message, raw)
|
||||
|
||||
def assignZeroCredit(self):
|
||||
self.points[self.currentQuestion] = 0
|
||||
|
||||
def addPoints(self, amt):
|
||||
self.points[self.currentQuestion] += amt
|
||||
|
||||
def deductPoints(self, amt):
|
||||
self.points[self.currentQuestion] -= amt
|
||||
|
||||
def assignFullCredit(self, message="", raw=False):
|
||||
self.points[self.currentQuestion] = self.maxes[self.currentQuestion]
|
||||
if message != "":
|
||||
self.addMessage(message, raw)
|
||||
|
||||
def addMessage(self, message, raw=False):
|
||||
if not raw:
|
||||
# We assume raw messages, formatted for HTML, are printed separately
|
||||
if self.mute: util.unmutePrint()
|
||||
print('*** ' + message)
|
||||
if self.mute: util.mutePrint()
|
||||
message = html.escape(message)
|
||||
self.messages[self.currentQuestion].append(message)
|
||||
|
||||
def addMessageToEmail(self, message):
|
||||
print("WARNING**** addMessageToEmail is deprecated %s" % message)
|
||||
for line in message.split('\n'):
|
||||
pass
|
||||
# print '%%% ' + line + ' %%%'
|
||||
# self.messages[self.currentQuestion].append(line)
|
||||
|
||||
|
||||
class Counter(dict):
|
||||
"""
|
||||
Dict with default 0
|
||||
"""
|
||||
|
||||
def __getitem__(self, idx):
|
||||
try:
|
||||
return dict.__getitem__(self, idx)
|
||||
except KeyError:
|
||||
return 0
|
||||
|
||||
def totalCount(self):
|
||||
"""
|
||||
Returns the sum of counts for all keys.
|
||||
"""
|
||||
return sum(self.values())
|
18
tutorial/projectParams.py
Normal file
18
tutorial/projectParams.py
Normal file
@ -0,0 +1,18 @@
|
||||
# projectParams.py
|
||||
# ----------------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
STUDENT_CODE_DEFAULT = 'addition.py,buyLotsOfFruit.py,shopSmart.py,shopAroundTown.py'
|
||||
PROJECT_TEST_CLASSES = 'tutorialTestClasses.py'
|
||||
PROJECT_NAME = 'Project 0: Tutorial'
|
||||
BONUS_PIC = False
|
60
tutorial/shop.py
Normal file
60
tutorial/shop.py
Normal file
@ -0,0 +1,60 @@
|
||||
# shop.py
|
||||
# -------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
class FruitShop:
|
||||
|
||||
def __init__(self, name, fruitPrices):
|
||||
"""
|
||||
name: Name of the fruit shop
|
||||
|
||||
fruitPrices: Dictionary with keys as fruit
|
||||
strings and prices for values e.g.
|
||||
{'apples':2.00, 'oranges': 1.50, 'pears': 1.75}
|
||||
"""
|
||||
self.fruitPrices = fruitPrices
|
||||
self.name = name
|
||||
print('Welcome to %s fruit shop' % (name))
|
||||
|
||||
def getCostPerPound(self, fruit):
|
||||
"""
|
||||
fruit: Fruit string
|
||||
Returns cost of 'fruit', assuming 'fruit'
|
||||
is in our inventory or None otherwise
|
||||
"""
|
||||
if fruit not in self.fruitPrices:
|
||||
return None
|
||||
return self.fruitPrices[fruit]
|
||||
|
||||
def getPriceOfOrder(self, orderList):
|
||||
"""
|
||||
orderList: List of (fruit, numPounds) tuples
|
||||
|
||||
Returns cost of orderList, only including the values of
|
||||
fruits that this fruit shop has.
|
||||
"""
|
||||
totalCost = 0.0
|
||||
for fruit, numPounds in orderList:
|
||||
costPerPound = self.getCostPerPound(fruit)
|
||||
if costPerPound != None:
|
||||
totalCost += numPounds * costPerPound
|
||||
return totalCost
|
||||
|
||||
def getName(self):
|
||||
return self.name
|
||||
|
||||
def __str__(self):
|
||||
return "<FruitShop: %s>" % self.getName()
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
114
tutorial/shopAroundTown.py
Normal file
114
tutorial/shopAroundTown.py
Normal file
@ -0,0 +1,114 @@
|
||||
# shopAroundTown.py
|
||||
# -----------------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
"""
|
||||
Here's the intended output of this script, once you fill it in:
|
||||
|
||||
Welcome to shop1 fruit shop
|
||||
Welcome to shop2 fruit shop
|
||||
Welcome to shop3 fruit shop
|
||||
Orders: [('apples', 1.0), ('oranges', 3.0), ('limes', 2.0)]
|
||||
At gas price 1 the best route is: ['shop1', 'shop2', 'shop3']
|
||||
At gas price 3 the best route is: ['shop1', 'shop3']
|
||||
At gas price 5 the best route is: ['shop2']
|
||||
At gas price -1 the best route is: ['shop2', 'shop1', 'shop3']
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
import shop
|
||||
import town
|
||||
|
||||
|
||||
def shopAroundTown(orderList, fruitTown, gasCost):
|
||||
"""
|
||||
orderList: List of (fruit, numPound) tuples
|
||||
fruitTown: A Town object
|
||||
gasCost: A number representing the cost of going one mile
|
||||
Returns a list of shops in the order that is the optimal route to take when
|
||||
buying the fruit in the orderList
|
||||
"""
|
||||
possibleRoutes = []
|
||||
subsets = getAllSubsets(fruitTown.getShops())
|
||||
for subset in subsets:
|
||||
names = [shop.getName() for shop in subset]
|
||||
if fruitTown.allFruitsCarriedAtShops(orderList, names):
|
||||
possibleRoutes += getAllPermutations(subset)
|
||||
minCost, bestRoute = None, None
|
||||
for route in possibleRoutes:
|
||||
cost = fruitTown.getPriceOfOrderOnRoute(orderList, route, gasCost)
|
||||
if minCost == None or cost < minCost:
|
||||
minCost, bestRoute = cost, route
|
||||
return bestRoute
|
||||
|
||||
|
||||
def getAllSubsets(lst):
|
||||
"""
|
||||
lst: A list
|
||||
Returns the powerset of lst, i.e. a list of all the possible subsets of lst
|
||||
"""
|
||||
if not lst:
|
||||
return []
|
||||
withFirst = [[lst[0]] + rest for rest in getAllSubsets(lst[1:])]
|
||||
withoutFirst = getAllSubsets(lst[1:])
|
||||
return withFirst + withoutFirst
|
||||
|
||||
|
||||
def getAllPermutations(lst):
|
||||
"""
|
||||
lst: A list
|
||||
Returns a list of all permutations of lst
|
||||
"""
|
||||
if not lst:
|
||||
return []
|
||||
elif len(lst) == 1:
|
||||
return lst
|
||||
allPermutations = []
|
||||
for i in range(len(lst)):
|
||||
item = lst[i]
|
||||
withoutItem = lst[:i] + lst[i:]
|
||||
allPermutations += prependToAll(item, getAllPermutations(withoutItem))
|
||||
return allPermutations
|
||||
|
||||
|
||||
def prependToAll(item, lsts):
|
||||
"""
|
||||
item: Any object
|
||||
lsts: A list of lists
|
||||
Returns a copy of lsts with item prepended to each list contained in lsts
|
||||
"""
|
||||
return [[item] + lst for lst in lsts]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
"This code runs when you invoke the script from the command line"
|
||||
orders = [('apples', 1.0), ('oranges', 3.0), ('limes', 2.0)]
|
||||
dir1 = {'apples': 2.0, 'oranges': 1.0}
|
||||
dir2 = {'apples': 1.0, 'oranges': 5.0, 'limes': 3.0}
|
||||
dir3 = {'apples': 2.0, 'limes': 2.0}
|
||||
shop1 = shop.FruitShop('shop1', dir1)
|
||||
shop2 = shop.FruitShop('shop2', dir2)
|
||||
shop3 = shop.FruitShop('shop3', dir3)
|
||||
shops = [shop1, shop2, shop3]
|
||||
distances = {('home', 'shop1'): 2,
|
||||
('home', 'shop2'): 1,
|
||||
('home', 'shop3'): 1,
|
||||
('shop1', 'shop2'): 2.5,
|
||||
('shop1', 'shop3'): 2.5,
|
||||
('shop2', 'shop3'): 1
|
||||
}
|
||||
fruitTown = town.Town(shops, distances)
|
||||
print("Orders:", orders)
|
||||
for price in (1, 3, 5, -1):
|
||||
print("At gas price", price, "the best route is:", \
|
||||
shopAroundTown(orders, fruitTown, price))
|
46
tutorial/shopSmart.py
Normal file
46
tutorial/shopSmart.py
Normal file
@ -0,0 +1,46 @@
|
||||
# shopSmart.py
|
||||
# ------------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
"""
|
||||
Here's the intended output of this script, once you fill it in:
|
||||
|
||||
Welcome to shop1 fruit shop
|
||||
Welcome to shop2 fruit shop
|
||||
For orders: [('apples', 1.0), ('oranges', 3.0)] best shop is shop1
|
||||
For orders: [('apples', 3.0)] best shop is shop2
|
||||
"""
|
||||
from __future__ import print_function
|
||||
import shop
|
||||
|
||||
|
||||
def shopSmart(orderList, fruitShops):
|
||||
"""
|
||||
orderList: List of (fruit, numPound) tuples
|
||||
fruitShops: List of FruitShops
|
||||
"""
|
||||
"*** YOUR CODE HERE ***"
|
||||
return None
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
"This code runs when you invoke the script from the command line"
|
||||
orders = [('apples', 1.0), ('oranges', 3.0)]
|
||||
dir1 = {'apples': 2.0, 'oranges': 1.0}
|
||||
shop1 = shop.FruitShop('shop1', dir1)
|
||||
dir2 = {'apples': 1.0, 'oranges': 5.0}
|
||||
shop2 = shop.FruitShop('shop2', dir2)
|
||||
shops = [shop1, shop2]
|
||||
print("For orders ", orders, ", the best shop is", shopSmart(orders, shops).getName())
|
||||
orders = [('apples', 3.0)]
|
||||
print("For orders: ", orders, ", the best shop is", shopSmart(orders, shops).getName())
|
30
tutorial/submission_autograder.py
Normal file
30
tutorial/submission_autograder.py
Normal file
File diff suppressed because one or more lines are too long
207
tutorial/testClasses.py
Normal file
207
tutorial/testClasses.py
Normal file
@ -0,0 +1,207 @@
|
||||
# testClasses.py
|
||||
# --------------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
# import modules from python standard library
|
||||
from __future__ import print_function
|
||||
import inspect
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
# Class which models a question in a project. Note that questions have a
|
||||
# maximum number of points they are worth, and are composed of a series of
|
||||
# test cases
|
||||
class Question(object):
|
||||
|
||||
def raiseNotDefined(self):
|
||||
print('Method not implemented: %s' % inspect.stack()[1][3])
|
||||
sys.exit(1)
|
||||
|
||||
def __init__(self, questionDict, display):
|
||||
self.maxPoints = int(questionDict['max_points'])
|
||||
self.testCases = []
|
||||
self.display = display
|
||||
|
||||
def getDisplay(self):
|
||||
return self.display
|
||||
|
||||
def getMaxPoints(self):
|
||||
return self.maxPoints
|
||||
|
||||
# Note that 'thunk' must be a function which accepts a single argument,
|
||||
# namely a 'grading' object
|
||||
def addTestCase(self, testCase, thunk):
|
||||
self.testCases.append((testCase, thunk))
|
||||
|
||||
def execute(self, grades):
|
||||
self.raiseNotDefined()
|
||||
|
||||
|
||||
# Question in which all test cases must be passed in order to receive credit
|
||||
class PassAllTestsQuestion(Question):
|
||||
|
||||
def execute(self, grades):
|
||||
# TODO: is this the right way to use grades? The autograder doesn't seem to use it.
|
||||
testsFailed = False
|
||||
grades.assignZeroCredit()
|
||||
for _, f in self.testCases:
|
||||
if not f(grades):
|
||||
testsFailed = True
|
||||
if testsFailed:
|
||||
grades.fail("Tests failed.")
|
||||
else:
|
||||
grades.assignFullCredit()
|
||||
|
||||
|
||||
class ExtraCreditPassAllTestsQuestion(Question):
|
||||
def __init__(self, questionDict, display):
|
||||
Question.__init__(self, questionDict, display)
|
||||
self.extraPoints = int(questionDict['extra_points'])
|
||||
|
||||
def execute(self, grades):
|
||||
# TODO: is this the right way to use grades? The autograder doesn't seem to use it.
|
||||
testsFailed = False
|
||||
grades.assignZeroCredit()
|
||||
for _, f in self.testCases:
|
||||
if not f(grades):
|
||||
testsFailed = True
|
||||
if testsFailed:
|
||||
grades.fail("Tests failed.")
|
||||
else:
|
||||
grades.assignFullCredit()
|
||||
grades.addPoints(self.extraPoints)
|
||||
|
||||
|
||||
# Question in which predict credit is given for test cases with a ``points'' property.
|
||||
# All other tests are mandatory and must be passed.
|
||||
class HackedPartialCreditQuestion(Question):
|
||||
|
||||
def execute(self, grades):
|
||||
# TODO: is this the right way to use grades? The autograder doesn't seem to use it.
|
||||
grades.assignZeroCredit()
|
||||
|
||||
points = 0
|
||||
passed = True
|
||||
for testCase, f in self.testCases:
|
||||
testResult = f(grades)
|
||||
if "points" in testCase.testDict:
|
||||
if testResult: points += float(testCase.testDict["points"])
|
||||
else:
|
||||
passed = passed and testResult
|
||||
|
||||
## FIXME: Below terrible hack to match q3's logic
|
||||
if int(points) == self.maxPoints and not passed:
|
||||
grades.assignZeroCredit()
|
||||
else:
|
||||
grades.addPoints(int(points))
|
||||
|
||||
|
||||
class Q6PartialCreditQuestion(Question):
|
||||
"""Fails any test which returns False, otherwise doesn't effect the grades object.
|
||||
Partial credit tests will add the required points."""
|
||||
|
||||
def execute(self, grades):
|
||||
grades.assignZeroCredit()
|
||||
|
||||
results = []
|
||||
for _, f in self.testCases:
|
||||
results.append(f(grades))
|
||||
if False in results:
|
||||
grades.assignZeroCredit()
|
||||
|
||||
|
||||
class PartialCreditQuestion(Question):
|
||||
"""Fails any test which returns False, otherwise doesn't effect the grades object.
|
||||
Partial credit tests will add the required points."""
|
||||
|
||||
def execute(self, grades):
|
||||
grades.assignZeroCredit()
|
||||
|
||||
for _, f in self.testCases:
|
||||
if not f(grades):
|
||||
grades.assignZeroCredit()
|
||||
grades.fail("Tests failed.")
|
||||
return False
|
||||
|
||||
|
||||
class NumberPassedQuestion(Question):
|
||||
"""Grade is the number of test cases passed."""
|
||||
|
||||
def execute(self, grades):
|
||||
grades.addPoints([f(grades) for _, f in self.testCases].count(True))
|
||||
|
||||
|
||||
# Template modeling a generic test case
|
||||
class TestCase(object):
|
||||
|
||||
def raiseNotDefined(self):
|
||||
print('Method not implemented: %s' % inspect.stack()[1][3])
|
||||
sys.exit(1)
|
||||
|
||||
def getPath(self):
|
||||
return self.path
|
||||
|
||||
def __init__(self, question, testDict):
|
||||
self.question = question
|
||||
self.testDict = testDict
|
||||
self.path = testDict['path']
|
||||
self.messages = []
|
||||
|
||||
def __str__(self):
|
||||
self.raiseNotDefined()
|
||||
|
||||
def execute(self, grades, moduleDict, solutionDict):
|
||||
self.raiseNotDefined()
|
||||
|
||||
def writeSolution(self, moduleDict, filePath):
|
||||
self.raiseNotDefined()
|
||||
return True
|
||||
|
||||
# Tests should call the following messages for grading
|
||||
# to ensure a uniform format for test output.
|
||||
#
|
||||
# TODO: this is hairy, but we need to fix grading.py's interface
|
||||
# to get a nice hierarchical project - question - test structure,
|
||||
# then these should be moved into Question proper.
|
||||
def testPass(self, grades):
|
||||
grades.addMessage('PASS: %s' % (self.path,))
|
||||
for line in self.messages:
|
||||
grades.addMessage(' %s' % (line,))
|
||||
return True
|
||||
|
||||
def testFail(self, grades):
|
||||
grades.addMessage('FAIL: %s' % (self.path,))
|
||||
for line in self.messages:
|
||||
grades.addMessage(' %s' % (line,))
|
||||
return False
|
||||
|
||||
# This should really be question level?
|
||||
#
|
||||
def testPartial(self, grades, points, maxPoints):
|
||||
grades.addPoints(points)
|
||||
extraCredit = max(0, points - maxPoints)
|
||||
regularCredit = points - extraCredit
|
||||
|
||||
grades.addMessage('%s: %s (%s of %s points)' % (
|
||||
"PASS" if points >= maxPoints else "FAIL", self.path, regularCredit, maxPoints))
|
||||
if extraCredit > 0:
|
||||
grades.addMessage('EXTRA CREDIT: %s points' % (extraCredit,))
|
||||
|
||||
for line in self.messages:
|
||||
grades.addMessage(' %s' % (line,))
|
||||
|
||||
return True
|
||||
|
||||
def addMessage(self, message):
|
||||
self.messages.extend(message.split('\n'))
|
86
tutorial/testParser.py
Normal file
86
tutorial/testParser.py
Normal file
@ -0,0 +1,86 @@
|
||||
# testParser.py
|
||||
# -------------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
from __future__ import print_function
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
class TestParser(object):
|
||||
|
||||
def __init__(self, path):
|
||||
# save the path to the test file
|
||||
self.path = path
|
||||
|
||||
def removeComments(self, rawlines):
|
||||
# remove any portion of a line following a '#' symbol
|
||||
fixed_lines = []
|
||||
for l in rawlines:
|
||||
idx = l.find('#')
|
||||
if idx == -1:
|
||||
fixed_lines.append(l)
|
||||
else:
|
||||
fixed_lines.append(l[0:idx])
|
||||
return '\n'.join(fixed_lines)
|
||||
|
||||
def parse(self):
|
||||
# read in the test case and remove comments
|
||||
test = {}
|
||||
with open(self.path) as handle:
|
||||
raw_lines = handle.read().split('\n')
|
||||
|
||||
test_text = self.removeComments(raw_lines)
|
||||
test['__raw_lines__'] = raw_lines
|
||||
test['path'] = self.path
|
||||
test['__emit__'] = []
|
||||
lines = test_text.split('\n')
|
||||
i = 0
|
||||
# read a property in each loop cycle
|
||||
while (i < len(lines)):
|
||||
# skip blank lines
|
||||
if re.match('\A\s*\Z', lines[i]):
|
||||
test['__emit__'].append(("raw", raw_lines[i]))
|
||||
i += 1
|
||||
continue
|
||||
m = re.match('\A([^"]*?):\s*"([^"]*)"\s*\Z', lines[i])
|
||||
if m:
|
||||
test[m.group(1)] = m.group(2)
|
||||
test['__emit__'].append(("oneline", m.group(1)))
|
||||
i += 1
|
||||
continue
|
||||
m = re.match('\A([^"]*?):\s*"""\s*\Z', lines[i])
|
||||
if m:
|
||||
msg = []
|
||||
i += 1
|
||||
while (not re.match('\A\s*"""\s*\Z', lines[i])):
|
||||
msg.append(raw_lines[i])
|
||||
i += 1
|
||||
test[m.group(1)] = '\n'.join(msg)
|
||||
test['__emit__'].append(("multiline", m.group(1)))
|
||||
i += 1
|
||||
continue
|
||||
print('error parsing test file: %s' % self.path)
|
||||
sys.exit(1)
|
||||
return test
|
||||
|
||||
|
||||
def emitTestDict(testDict, handle):
|
||||
for kind, data in testDict['__emit__']:
|
||||
if kind == "raw":
|
||||
handle.write(data + "\n")
|
||||
elif kind == "oneline":
|
||||
handle.write('%s: "%s"\n' % (data, testDict[data]))
|
||||
elif kind == "multiline":
|
||||
handle.write('%s: """\n%s\n"""\n' % (data, testDict[data]))
|
||||
else:
|
||||
raise Exception("Bad __emit__")
|
1
tutorial/test_cases/CONFIG
Normal file
1
tutorial/test_cases/CONFIG
Normal file
@ -0,0 +1 @@
|
||||
order: "q1 q2 q3"
|
2
tutorial/test_cases/q1/CONFIG
Normal file
2
tutorial/test_cases/q1/CONFIG
Normal file
@ -0,0 +1,2 @@
|
||||
max_points: "1"
|
||||
class: "PassAllTestsQuestion"
|
3
tutorial/test_cases/q1/addition1.solution
Normal file
3
tutorial/test_cases/q1/addition1.solution
Normal file
@ -0,0 +1,3 @@
|
||||
# This is the solution file for test_cases/q1/addition1.test.
|
||||
# The result of evaluating the test must equal the below when cast to a string.
|
||||
result: "2"
|
7
tutorial/test_cases/q1/addition1.test
Normal file
7
tutorial/test_cases/q1/addition1.test
Normal file
@ -0,0 +1,7 @@
|
||||
class: "EvalTest"
|
||||
success: "add(a,b) returns the sum of a and b"
|
||||
failure: "add(a,b) must return the sum of a and b"
|
||||
|
||||
# A python expression to be evaluated. This expression must return the
|
||||
# same result for the student and instructor's code.
|
||||
test: "addition.add(1,1)"
|
3
tutorial/test_cases/q1/addition2.solution
Normal file
3
tutorial/test_cases/q1/addition2.solution
Normal file
@ -0,0 +1,3 @@
|
||||
# This is the solution file for test_cases/q1/addition2.test.
|
||||
# The result of evaluating the test must equal the below when cast to a string.
|
||||
result: "5"
|
7
tutorial/test_cases/q1/addition2.test
Normal file
7
tutorial/test_cases/q1/addition2.test
Normal file
@ -0,0 +1,7 @@
|
||||
class: "EvalTest"
|
||||
success: "add(a,b) returns the sum of a and b"
|
||||
failure: "add(a,b) must return the sum of a and b"
|
||||
|
||||
# A python expression to be evaluated. This expression must return the
|
||||
# same result for the student and instructor's code.
|
||||
test: "addition.add(2,3)"
|
3
tutorial/test_cases/q1/addition3.solution
Normal file
3
tutorial/test_cases/q1/addition3.solution
Normal file
@ -0,0 +1,3 @@
|
||||
# This is the solution file for test_cases/q1/addition3.test.
|
||||
# The result of evaluating the test must equal the below when cast to a string.
|
||||
result: "7.9"
|
7
tutorial/test_cases/q1/addition3.test
Normal file
7
tutorial/test_cases/q1/addition3.test
Normal file
@ -0,0 +1,7 @@
|
||||
class: "EvalTest"
|
||||
success: "add(a,b) returns the sum of a and b"
|
||||
failure: "add(a,b) must return the sum of a and b"
|
||||
|
||||
# A python expression to be evaluated. This expression must return the
|
||||
# same result for the student and instructor's code.
|
||||
test: "addition.add(10,-2.1)"
|
2
tutorial/test_cases/q2/CONFIG
Normal file
2
tutorial/test_cases/q2/CONFIG
Normal file
@ -0,0 +1,2 @@
|
||||
max_points: "1"
|
||||
class: "PassAllTestsQuestion"
|
3
tutorial/test_cases/q2/food_price1.solution
Normal file
3
tutorial/test_cases/q2/food_price1.solution
Normal file
@ -0,0 +1,3 @@
|
||||
# This is the solution file for test_cases/q2/food_price1.test.
|
||||
# The result of evaluating the test must equal the below when cast to a string.
|
||||
result: "12.25"
|
7
tutorial/test_cases/q2/food_price1.test
Normal file
7
tutorial/test_cases/q2/food_price1.test
Normal file
@ -0,0 +1,7 @@
|
||||
class: "EvalTest"
|
||||
success: "buyLotsOfFruit correctly computes the cost of the order"
|
||||
failure: "buyLotsOfFruit must compute the correct cost of the order"
|
||||
|
||||
# A python expression to be evaluated. This expression must return the
|
||||
# same result for the student and instructor's code.
|
||||
test: "buyLotsOfFruit.buyLotsOfFruit([ ('apples', 2.0), ('pears',3.0), ('limes',4.0) ])"
|
3
tutorial/test_cases/q2/food_price2.solution
Normal file
3
tutorial/test_cases/q2/food_price2.solution
Normal file
@ -0,0 +1,3 @@
|
||||
# This is the solution file for test_cases/q2/food_price2.test.
|
||||
# The result of evaluating the test must equal the below when cast to a string.
|
||||
result: "14.75"
|
7
tutorial/test_cases/q2/food_price2.test
Normal file
7
tutorial/test_cases/q2/food_price2.test
Normal file
@ -0,0 +1,7 @@
|
||||
class: "EvalTest"
|
||||
success: "buyLotsOfFruit correctly computes the cost of the order"
|
||||
failure: "buyLotsOfFruit must compute the correct cost of the order"
|
||||
|
||||
# A python expression to be evaluated. This expression must return the
|
||||
# same result for the student and instructor's code.
|
||||
test: "buyLotsOfFruit.buyLotsOfFruit([ ('apples', 4.0), ('pears',3.0), ('limes',2.0) ])"
|
3
tutorial/test_cases/q2/food_price3.solution
Normal file
3
tutorial/test_cases/q2/food_price3.solution
Normal file
@ -0,0 +1,3 @@
|
||||
# This is the solution file for test_cases/q2/food_price3.test.
|
||||
# The result of evaluating the test must equal the below when cast to a string.
|
||||
result: "6.4375"
|
7
tutorial/test_cases/q2/food_price3.test
Normal file
7
tutorial/test_cases/q2/food_price3.test
Normal file
@ -0,0 +1,7 @@
|
||||
class: "EvalTest"
|
||||
success: "buyLotsOfFruit correctly computes the cost of the order"
|
||||
failure: "buyLotsOfFruit must compute the correct cost of the order"
|
||||
|
||||
# A python expression to be evaluated. This expression must return the
|
||||
# same result for the student and instructor's code.
|
||||
test: "buyLotsOfFruit.buyLotsOfFruit([ ('apples', 1.25), ('pears',1.50), ('limes',1.75) ])"
|
2
tutorial/test_cases/q3/CONFIG
Normal file
2
tutorial/test_cases/q3/CONFIG
Normal file
@ -0,0 +1,2 @@
|
||||
max_points: "1"
|
||||
class: "PassAllTestsQuestion"
|
3
tutorial/test_cases/q3/select_shop1.solution
Normal file
3
tutorial/test_cases/q3/select_shop1.solution
Normal file
@ -0,0 +1,3 @@
|
||||
# This is the solution file for test_cases/q3/select_shop1.test.
|
||||
# The result of evaluating the test must equal the below when cast to a string.
|
||||
result: "<FruitShop: shop1>"
|
21
tutorial/test_cases/q3/select_shop1.test
Normal file
21
tutorial/test_cases/q3/select_shop1.test
Normal file
@ -0,0 +1,21 @@
|
||||
class: "EvalTest"
|
||||
success: "shopSmart(order, shops) selects the cheapest shop"
|
||||
failure: "shopSmart(order, shops) must select the cheapest shop"
|
||||
|
||||
# Python statements initializing variables for the test below.
|
||||
preamble: """
|
||||
import shop
|
||||
|
||||
dir1 = {'apples': 2.0, 'oranges':1.0}
|
||||
shop1 = shop.FruitShop('shop1',dir1)
|
||||
dir2 = {'apples': 1.0, 'oranges': 5.0}
|
||||
shop2 = shop.FruitShop('shop2',dir2)
|
||||
shops = [shop1, shop2]
|
||||
|
||||
order = [('apples',1.0), ('oranges',3.0)]
|
||||
ans = shopSmart.shopSmart(order, shops)
|
||||
"""
|
||||
|
||||
# A python expression to be evaluated. This expression must return the
|
||||
# same result for the student and instructor's code.
|
||||
test: "ans"
|
3
tutorial/test_cases/q3/select_shop2.solution
Normal file
3
tutorial/test_cases/q3/select_shop2.solution
Normal file
@ -0,0 +1,3 @@
|
||||
# This is the solution file for test_cases/q3/select_shop2.test.
|
||||
# The result of evaluating the test must equal the below when cast to a string.
|
||||
result: "<FruitShop: shop2>"
|
21
tutorial/test_cases/q3/select_shop2.test
Normal file
21
tutorial/test_cases/q3/select_shop2.test
Normal file
@ -0,0 +1,21 @@
|
||||
class: "EvalTest"
|
||||
success: "shopSmart(order, shops) selects the cheapest shop"
|
||||
failure: "shopSmart(order, shops) must select the cheapest shop"
|
||||
|
||||
# Python statements initializing variables for the test below.
|
||||
preamble: """
|
||||
import shop
|
||||
|
||||
dir1 = {'apples': 2.0, 'oranges':1.0}
|
||||
shop1 = shop.FruitShop('shop1',dir1)
|
||||
dir2 = {'apples': 1.0, 'oranges': 5.0}
|
||||
shop2 = shop.FruitShop('shop2',dir2)
|
||||
shops = [shop1, shop2]
|
||||
|
||||
order = [('apples',3.0)]
|
||||
ans = shopSmart.shopSmart(order, shops)
|
||||
"""
|
||||
|
||||
# A python expression to be evaluated. This expression must return the
|
||||
# same result for the student and instructor's code.
|
||||
test: "ans"
|
3
tutorial/test_cases/q3/select_shop3.solution
Normal file
3
tutorial/test_cases/q3/select_shop3.solution
Normal file
@ -0,0 +1,3 @@
|
||||
# This is the solution file for test_cases/q3/select_shop3.test.
|
||||
# The result of evaluating the test must equal the below when cast to a string.
|
||||
result: "<FruitShop: shop3>"
|
23
tutorial/test_cases/q3/select_shop3.test
Normal file
23
tutorial/test_cases/q3/select_shop3.test
Normal file
@ -0,0 +1,23 @@
|
||||
class: "EvalTest"
|
||||
success: "shopSmart(order, shops) selects the cheapest shop"
|
||||
failure: "shopSmart(order, shops) must select the cheapest shop"
|
||||
|
||||
# Python statements initializing variables for the test below.
|
||||
preamble: """
|
||||
import shop
|
||||
|
||||
dir1 = {'apples': 2.0, 'oranges':1.0}
|
||||
shop1 = shop.FruitShop('shop1',dir1)
|
||||
dir2 = {'apples': 1.0, 'oranges': 5.0}
|
||||
shop2 = shop.FruitShop('shop2',dir2)
|
||||
dir3 = {'apples': 1.5, 'oranges': 2.0}
|
||||
shop3 = shop.FruitShop('shop3',dir3)
|
||||
shops = [shop1, shop2, shop3]
|
||||
|
||||
order = [('apples',10.0), ('oranges',3.0)]
|
||||
ans = shopSmart.shopSmart(order, shops)
|
||||
"""
|
||||
|
||||
# A python expression to be evaluated. This expression must return the
|
||||
# same result for the student and instructor's code.
|
||||
test: "ans"
|
85
tutorial/textDisplay.py
Normal file
85
tutorial/textDisplay.py
Normal file
@ -0,0 +1,85 @@
|
||||
# textDisplay.py
|
||||
# --------------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
from __future__ import print_function
|
||||
import time
|
||||
|
||||
try:
|
||||
import pacman
|
||||
except:
|
||||
pass
|
||||
|
||||
DRAW_EVERY = 1
|
||||
SLEEP_TIME = 0 # This can be overwritten by __init__
|
||||
DISPLAY_MOVES = False
|
||||
QUIET = False # Supresses output
|
||||
|
||||
|
||||
class NullGraphics:
|
||||
def initialize(self, state, isBlue=False):
|
||||
pass
|
||||
|
||||
def update(self, state):
|
||||
pass
|
||||
|
||||
def checkNullDisplay(self):
|
||||
return True
|
||||
|
||||
def pause(self):
|
||||
time.sleep(SLEEP_TIME)
|
||||
|
||||
def draw(self, state):
|
||||
print(state)
|
||||
|
||||
def updateDistributions(self, dist):
|
||||
pass
|
||||
|
||||
def finish(self):
|
||||
pass
|
||||
|
||||
|
||||
class PacmanGraphics:
|
||||
def __init__(self, speed=None):
|
||||
if speed != None:
|
||||
global SLEEP_TIME
|
||||
SLEEP_TIME = speed
|
||||
|
||||
def initialize(self, state, isBlue=False):
|
||||
self.draw(state)
|
||||
self.pause()
|
||||
self.turn = 0
|
||||
self.agentCounter = 0
|
||||
|
||||
def update(self, state):
|
||||
numAgents = len(state.agentStates)
|
||||
self.agentCounter = (self.agentCounter + 1) % numAgents
|
||||
if self.agentCounter == 0:
|
||||
self.turn += 1
|
||||
if DISPLAY_MOVES:
|
||||
ghosts = [pacman.nearestPoint(state.getGhostPosition(i)) for i in range(1, numAgents)]
|
||||
print("%4d) P: %-8s" % (self.turn, str(pacman.nearestPoint(state.getPacmanPosition()))),
|
||||
'| Score: %-5d' % state.score, '| Ghosts:', ghosts)
|
||||
if self.turn % DRAW_EVERY == 0:
|
||||
self.draw(state)
|
||||
self.pause()
|
||||
if state._win or state._lose:
|
||||
self.draw(state)
|
||||
|
||||
def pause(self):
|
||||
time.sleep(SLEEP_TIME)
|
||||
|
||||
def draw(self, state):
|
||||
print(state)
|
||||
|
||||
def finish(self):
|
||||
pass
|
105
tutorial/town.py
Normal file
105
tutorial/town.py
Normal file
@ -0,0 +1,105 @@
|
||||
# town.py
|
||||
# -------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
import shop
|
||||
|
||||
|
||||
class Town:
|
||||
|
||||
def __init__(self, shops, distances):
|
||||
"""
|
||||
shops: List of FruitShop objects
|
||||
|
||||
distances: Dictionary with keys as pairs (tuples) of names of places
|
||||
('home' or name strings of FruitShops) and numbers for values which
|
||||
represent the distance between the two places in miles, e.g.
|
||||
{('home','shop1') : 1, ('home','shop2') : 1, ('shop1','shop2') : 2}
|
||||
"""
|
||||
self.shops = shops
|
||||
self.distances = distances
|
||||
|
||||
def getFruitCostPerPoundOnRoute(self, fruit, route):
|
||||
"""
|
||||
fruit: Fruit string
|
||||
|
||||
route: List of shop names
|
||||
Returns the best cost per pound of 'fruit' at any of the shops along
|
||||
the route. If none of the shops carry 'fruit', returns None
|
||||
"""
|
||||
routeShops = [shop for shop in self.shops if shop.getName() in route]
|
||||
costs = []
|
||||
for shop in routeShops:
|
||||
cost = shop.getCostPerPound(fruit)
|
||||
if cost is not None:
|
||||
costs.append(cost)
|
||||
if not costs:
|
||||
# None of the shops carry this fruit
|
||||
return None
|
||||
return min(costs)
|
||||
|
||||
def allFruitsCarriedAtShops(self, orderList, shops):
|
||||
"""
|
||||
orderList: List of (fruit, numPounds) tuples
|
||||
|
||||
shops: List of shop names
|
||||
Returns whether all fruit in the order list can be purchased at at least
|
||||
one of these shops.
|
||||
"""
|
||||
return None not in [self.getFruitCostPerPoundOnRoute(fruit, shops)
|
||||
for fruit, _ in orderList]
|
||||
|
||||
def getDistance(self, loc1, loc2):
|
||||
"""
|
||||
loc1: A name of a place ('home' or the name of a FruitShop in town)
|
||||
|
||||
loc2: A name of a place ('home' or the name of a FruitShop in town)
|
||||
Returns the distance between these two places in this town.
|
||||
"""
|
||||
if (loc1, loc2) in self.distances:
|
||||
return self.distances[(loc1, loc2)]
|
||||
return self.distances[(loc2, loc1)]
|
||||
|
||||
def getTotalDistanceOnRoute(self, route):
|
||||
"""
|
||||
route: List of shop names
|
||||
Returns the total distance traveled by starting at 'home', going to
|
||||
each shop on the route in order, then returning to 'home'
|
||||
"""
|
||||
if not route:
|
||||
return 0
|
||||
totalDistance = self.getDistance('home', route[0])
|
||||
for i in xrange(len(route) - 1):
|
||||
totalDistance += self.getDistance(route[i], route[i + 1])
|
||||
totalDistance += self.getDistance(route[-1], 'home')
|
||||
return totalDistance
|
||||
|
||||
def getPriceOfOrderOnRoute(self, orderList, route, gasCost):
|
||||
"""
|
||||
orderList: List of (fruit, numPounds) tuples
|
||||
|
||||
route: List of shop names
|
||||
|
||||
gasCost: A number representing the cost of driving 1 mile
|
||||
Returns cost of orderList on this route. If any fruit are not available
|
||||
on this route, returns None.
|
||||
"""
|
||||
totalCost = self.getTotalDistanceOnRoute(route) * gasCost
|
||||
for fruit, numPounds in orderList:
|
||||
costPerPound = self.getFruitCostPerPoundOnRoute(fruit, route)
|
||||
if costPerPound is not None:
|
||||
totalCost += numPounds * costPerPound
|
||||
return totalCost
|
||||
|
||||
def getShops(self):
|
||||
return self.shops
|
57
tutorial/tutorialTestClasses.py
Normal file
57
tutorial/tutorialTestClasses.py
Normal file
@ -0,0 +1,57 @@
|
||||
# tutorialTestClasses.py
|
||||
# ----------------------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
import testClasses
|
||||
|
||||
|
||||
# Simple test case which evals an arbitrary piece of python code.
|
||||
# The test is correct if the output of the code given the student's
|
||||
# solution matches that of the instructor's.
|
||||
class EvalTest(testClasses.TestCase):
|
||||
|
||||
def __init__(self, question, testDict):
|
||||
super(EvalTest, self).__init__(question, testDict)
|
||||
self.preamble = compile(testDict.get('preamble', ""), "%s.preamble" % self.getPath(), 'exec')
|
||||
self.test = compile(testDict['test'], "%s.test" % self.getPath(), 'eval')
|
||||
self.success = testDict['success']
|
||||
self.failure = testDict['failure']
|
||||
|
||||
def evalCode(self, moduleDict):
|
||||
bindings = dict(moduleDict)
|
||||
# exec self.preamble in bindings
|
||||
exec(self.preamble, bindings)
|
||||
return str(eval(self.test, bindings))
|
||||
|
||||
def execute(self, grades, moduleDict, solutionDict):
|
||||
result = self.evalCode(moduleDict)
|
||||
if result == solutionDict['result']:
|
||||
grades.addMessage('PASS: %s' % self.path)
|
||||
grades.addMessage('\t%s' % self.success)
|
||||
return True
|
||||
else:
|
||||
grades.addMessage('FAIL: %s' % self.path)
|
||||
grades.addMessage('\t%s' % self.failure)
|
||||
grades.addMessage('\tstudent result: "%s"' % result)
|
||||
grades.addMessage('\tcorrect result: "%s"' % solutionDict['result'])
|
||||
|
||||
return False
|
||||
|
||||
def writeSolution(self, moduleDict, filePath):
|
||||
handle = open(filePath, 'w')
|
||||
handle.write('# This is the solution file for %s.\n' % self.path)
|
||||
handle.write('# The result of evaluating the test must equal the below when cast to a string.\n')
|
||||
|
||||
handle.write('result: "%s"\n' % self.evalCode(moduleDict))
|
||||
handle.close()
|
||||
return True
|
696
tutorial/util.py
Normal file
696
tutorial/util.py
Normal file
@ -0,0 +1,696 @@
|
||||
# util.py
|
||||
# -------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
# util.py
|
||||
# -------
|
||||
# Licensing Information: You are free to use or extend these projects for
|
||||
# educational purposes provided that (1) you do not distribute or publish
|
||||
# solutions, (2) you retain this notice, and (3) you provide clear
|
||||
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
|
||||
#
|
||||
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
|
||||
# The core projects and autograders were primarily created by John DeNero
|
||||
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
|
||||
# Student side autograding was added by Brad Miller, Nick Hay, and
|
||||
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
|
||||
|
||||
|
||||
import sys
|
||||
import inspect
|
||||
import heapq, random
|
||||
|
||||
|
||||
# import cStringIO
|
||||
|
||||
|
||||
class FixedRandom:
|
||||
def __init__(self):
|
||||
fixedState = (3, (2147483648, 507801126, 683453281, 310439348, 2597246090,
|
||||
2209084787, 2267831527, 979920060, 3098657677, 37650879, 807947081, 3974896263,
|
||||
881243242, 3100634921, 1334775171, 3965168385, 746264660, 4074750168, 500078808,
|
||||
776561771, 702988163, 1636311725, 2559226045, 157578202, 2498342920, 2794591496,
|
||||
4130598723, 496985844, 2944563015, 3731321600, 3514814613, 3362575829, 3038768745,
|
||||
2206497038, 1108748846, 1317460727, 3134077628, 988312410, 1674063516, 746456451,
|
||||
3958482413, 1857117812, 708750586, 1583423339, 3466495450, 1536929345, 1137240525,
|
||||
3875025632, 2466137587, 1235845595, 4214575620, 3792516855, 657994358, 1241843248,
|
||||
1695651859, 3678946666, 1929922113, 2351044952, 2317810202, 2039319015, 460787996, 3654096216,
|
||||
4068721415, 1814163703, 2904112444, 1386111013, 574629867, 2654529343, 3833135042, 2725328455,
|
||||
552431551, 4006991378, 1331562057, 3710134542, 303171486, 1203231078, 2670768975, 54570816,
|
||||
2679609001, 578983064, 1271454725, 3230871056, 2496832891, 2944938195, 1608828728, 367886575,
|
||||
2544708204, 103775539, 1912402393, 1098482180, 2738577070, 3091646463, 1505274463, 2079416566,
|
||||
659100352, 839995305, 1696257633, 274389836, 3973303017, 671127655, 1061109122, 517486945,
|
||||
1379749962, 3421383928, 3116950429, 2165882425, 2346928266, 2892678711, 2936066049,
|
||||
1316407868, 2873411858, 4279682888, 2744351923, 3290373816, 1014377279, 955200944, 4220990860,
|
||||
2386098930, 1772997650, 3757346974, 1621616438, 2877097197, 442116595, 2010480266, 2867861469,
|
||||
2955352695, 605335967, 2222936009, 2067554933, 4129906358, 1519608541, 1195006590, 1942991038,
|
||||
2736562236, 279162408, 1415982909, 4099901426, 1732201505, 2934657937, 860563237, 2479235483,
|
||||
3081651097, 2244720867, 3112631622, 1636991639, 3860393305, 2312061927, 48780114, 1149090394,
|
||||
2643246550, 1764050647, 3836789087, 3474859076, 4237194338, 1735191073, 2150369208, 92164394,
|
||||
756974036, 2314453957, 323969533, 4267621035, 283649842, 810004843, 727855536, 1757827251,
|
||||
3334960421, 3261035106, 38417393, 2660980472, 1256633965, 2184045390, 811213141, 2857482069,
|
||||
2237770878, 3891003138, 2787806886, 2435192790, 2249324662, 3507764896, 995388363, 856944153,
|
||||
619213904, 3233967826, 3703465555, 3286531781, 3863193356, 2992340714, 413696855, 3865185632,
|
||||
1704163171, 3043634452, 2225424707, 2199018022, 3506117517, 3311559776, 3374443561,
|
||||
1207829628, 668793165, 1822020716, 2082656160, 1160606415, 3034757648, 741703672, 3094328738,
|
||||
459332691, 2702383376, 1610239915, 4162939394, 557861574, 3805706338, 3832520705, 1248934879,
|
||||
3250424034, 892335058, 74323433, 3209751608, 3213220797, 3444035873, 3743886725, 1783837251,
|
||||
610968664, 580745246, 4041979504, 201684874, 2673219253, 1377283008, 3497299167, 2344209394,
|
||||
2304982920, 3081403782, 2599256854, 3184475235, 3373055826, 695186388, 2423332338, 222864327,
|
||||
1258227992, 3627871647, 3487724980, 4027953808, 3053320360, 533627073, 3026232514, 2340271949,
|
||||
867277230, 868513116, 2158535651, 2487822909, 3428235761, 3067196046, 3435119657, 1908441839,
|
||||
788668797, 3367703138, 3317763187, 908264443, 2252100381, 764223334, 4127108988, 384641349,
|
||||
3377374722, 1263833251, 1958694944, 3847832657, 1253909612, 1096494446, 555725445, 2277045895,
|
||||
3340096504, 1383318686, 4234428127, 1072582179, 94169494, 1064509968, 2681151917, 2681864920,
|
||||
734708852, 1338914021, 1270409500, 1789469116, 4191988204, 1716329784, 2213764829, 3712538840,
|
||||
919910444, 1318414447, 3383806712, 3054941722, 3378649942, 1205735655, 1268136494, 2214009444,
|
||||
2532395133, 3232230447, 230294038, 342599089, 772808141, 4096882234, 3146662953, 2784264306,
|
||||
1860954704, 2675279609, 2984212876, 2466966981, 2627986059, 2985545332, 2578042598,
|
||||
1458940786, 2944243755, 3959506256, 1509151382, 325761900, 942251521, 4184289782, 2756231555,
|
||||
3297811774, 1169708099, 3280524138, 3805245319, 3227360276, 3199632491, 2235795585,
|
||||
2865407118, 36763651, 2441503575, 3314890374, 1755526087, 17915536, 1196948233, 949343045,
|
||||
3815841867, 489007833, 2654997597, 2834744136, 417688687, 2843220846, 85621843, 747339336,
|
||||
2043645709, 3520444394, 1825470818, 647778910, 275904777, 1249389189, 3640887431, 4200779599,
|
||||
323384601, 3446088641, 4049835786, 1718989062, 3563787136, 44099190, 3281263107, 22910812,
|
||||
1826109246, 745118154, 3392171319, 1571490704, 354891067, 815955642, 1453450421, 940015623,
|
||||
796817754, 1260148619, 3898237757, 176670141, 1870249326, 3317738680, 448918002, 4059166594,
|
||||
2003827551, 987091377, 224855998, 3520570137, 789522610, 2604445123, 454472869, 475688926,
|
||||
2990723466, 523362238, 3897608102, 806637149, 2642229586, 2928614432, 1564415411, 1691381054,
|
||||
3816907227, 4082581003, 1895544448, 3728217394, 3214813157, 4054301607, 1882632454,
|
||||
2873728645, 3694943071, 1297991732, 2101682438, 3952579552, 678650400, 1391722293, 478833748,
|
||||
2976468591, 158586606, 2576499787, 662690848, 3799889765, 3328894692, 2474578497, 2383901391,
|
||||
1718193504, 3003184595, 3630561213, 1929441113, 3848238627, 1594310094, 3040359840,
|
||||
3051803867, 2462788790, 954409915, 802581771, 681703307, 545982392, 2738993819, 8025358,
|
||||
2827719383, 770471093, 3484895980, 3111306320, 3900000891, 2116916652, 397746721, 2087689510,
|
||||
721433935, 1396088885, 2751612384, 1998988613, 2135074843, 2521131298, 707009172, 2398321482,
|
||||
688041159, 2264560137, 482388305, 207864885, 3735036991, 3490348331, 1963642811, 3260224305,
|
||||
3493564223, 1939428454, 1128799656, 1366012432, 2858822447, 1428147157, 2261125391,
|
||||
1611208390, 1134826333, 2374102525, 3833625209, 2266397263, 3189115077, 770080230, 2674657172,
|
||||
4280146640, 3604531615, 4235071805, 3436987249, 509704467, 2582695198, 4256268040, 3391197562,
|
||||
1460642842, 1617931012, 457825497, 1031452907, 1330422862, 4125947620, 2280712485, 431892090,
|
||||
2387410588, 2061126784, 896457479, 3480499461, 2488196663, 4021103792, 1877063114, 2744470201,
|
||||
1046140599, 2129952955, 3583049218, 4217723693, 2720341743, 820661843, 1079873609, 3360954200,
|
||||
3652304997, 3335838575, 2178810636, 1908053374, 4026721976, 1793145418, 476541615, 973420250,
|
||||
515553040, 919292001, 2601786155, 1685119450, 3030170809, 1590676150, 1665099167, 651151584,
|
||||
2077190587, 957892642, 646336572, 2743719258, 866169074, 851118829, 4225766285, 963748226,
|
||||
799549420, 1955032629, 799460000, 2425744063, 2441291571, 1928963772, 528930629, 2591962884,
|
||||
3495142819, 1896021824, 901320159, 3181820243, 843061941, 3338628510, 3782438992, 9515330,
|
||||
1705797226, 953535929, 764833876, 3202464965, 2970244591, 519154982, 3390617541, 566616744,
|
||||
3438031503, 1853838297, 170608755, 1393728434, 676900116, 3184965776, 1843100290, 78995357,
|
||||
2227939888, 3460264600, 1745705055, 1474086965, 572796246, 4081303004, 882828851, 1295445825,
|
||||
137639900, 3304579600, 2722437017, 4093422709, 273203373, 2666507854, 3998836510, 493829981,
|
||||
1623949669, 3482036755, 3390023939, 833233937, 1639668730, 1499455075, 249728260, 1210694006,
|
||||
3836497489, 1551488720, 3253074267, 3388238003, 2372035079, 3945715164, 2029501215,
|
||||
3362012634, 2007375355, 4074709820, 631485888, 3135015769, 4273087084, 3648076204, 2739943601,
|
||||
1374020358, 1760722448, 3773939706, 1313027823, 1895251226, 4224465911, 421382535, 1141067370,
|
||||
3660034846, 3393185650, 1850995280, 1451917312, 3841455409, 3926840308, 1397397252,
|
||||
2572864479, 2500171350, 3119920613, 531400869, 1626487579, 1099320497, 407414753, 2438623324,
|
||||
99073255, 3175491512, 656431560, 1153671785, 236307875, 2824738046, 2320621382, 892174056,
|
||||
230984053, 719791226, 2718891946, 624), None)
|
||||
self.random = random.Random()
|
||||
self.random.setstate(fixedState)
|
||||
|
||||
|
||||
"""
|
||||
Data structures useful for implementing SearchAgents
|
||||
"""
|
||||
|
||||
|
||||
class Stack:
|
||||
"A container with a last-in-first-out (LIFO) queuing policy."
|
||||
|
||||
def __init__(self):
|
||||
self.list = []
|
||||
|
||||
def push(self, item):
|
||||
"Push 'item' onto the stack"
|
||||
self.list.append(item)
|
||||
|
||||
def pop(self):
|
||||
"Pop the most recently pushed item from the stack"
|
||||
return self.list.pop()
|
||||
|
||||
def isEmpty(self):
|
||||
"Returns true if the stack is empty"
|
||||
return len(self.list) == 0
|
||||
|
||||
|
||||
class Queue:
|
||||
"A container with a first-in-first-out (FIFO) queuing policy."
|
||||
|
||||
def __init__(self):
|
||||
self.list = []
|
||||
|
||||
def push(self, item):
|
||||
"Enqueue the 'item' into the queue"
|
||||
self.list.insert(0, item)
|
||||
|
||||
def pop(self):
|
||||
"""
|
||||
Dequeue the earliest enqueued item still in the queue. This
|
||||
operation removes the item from the queue.
|
||||
"""
|
||||
return self.list.pop()
|
||||
|
||||
def isEmpty(self):
|
||||
"Returns true if the queue is empty"
|
||||
return len(self.list) == 0
|
||||
|
||||
|
||||
class PriorityQueue:
|
||||
"""
|
||||
Implements a priority queue data structure. Each inserted item
|
||||
has a priority associated with it and the client is usually interested
|
||||
in quick retrieval of the lowest-priority item in the queue. This
|
||||
data structure allows O(1) access to the lowest-priority item.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.heap = []
|
||||
self.count = 0
|
||||
|
||||
def push(self, item, priority):
|
||||
entry = (priority, self.count, item)
|
||||
heapq.heappush(self.heap, entry)
|
||||
self.count += 1
|
||||
|
||||
def pop(self):
|
||||
(_, _, item) = heapq.heappop(self.heap)
|
||||
return item
|
||||
|
||||
def isEmpty(self):
|
||||
return len(self.heap) == 0
|
||||
|
||||
def update(self, item, priority):
|
||||
# If item already in priority queue with higher priority, update its priority and rebuild the heap.
|
||||
# If item already in priority queue with equal or lower priority, do nothing.
|
||||
# If item not in priority queue, do the same thing as self.push.
|
||||
for index, (p, c, i) in enumerate(self.heap):
|
||||
if i == item:
|
||||
if p <= priority:
|
||||
break
|
||||
del self.heap[index]
|
||||
self.heap.append((priority, c, item))
|
||||
heapq.heapify(self.heap)
|
||||
break
|
||||
else:
|
||||
self.push(item, priority)
|
||||
|
||||
|
||||
class PriorityQueueWithFunction(PriorityQueue):
|
||||
"""
|
||||
Implements a priority queue with the same push/pop signature of the
|
||||
Queue and the Stack classes. This is designed for drop-in replacement for
|
||||
those two classes. The caller has to provide a priority function, which
|
||||
extracts each item's priority.
|
||||
"""
|
||||
|
||||
def __init__(self, priorityFunction):
|
||||
"priorityFunction (item) -> priority"
|
||||
self.priorityFunction = priorityFunction # store the priority function
|
||||
PriorityQueue.__init__(self) # super-class initializer
|
||||
|
||||
def push(self, item):
|
||||
"Adds an item to the queue with priority from the priority function"
|
||||
PriorityQueue.push(self, item, self.priorityFunction(item))
|
||||
|
||||
|
||||
def manhattanDistance(xy1, xy2):
|
||||
"Returns the Manhattan distance between points xy1 and xy2"
|
||||
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
|
||||
|
||||
|
||||
"""
|
||||
Data structures and functions useful for various course projects
|
||||
|
||||
The search project should not need anything below this line.
|
||||
"""
|
||||
|
||||
|
||||
class Counter(dict):
|
||||
"""
|
||||
A counter keeps track of counts for a set of keys.
|
||||
|
||||
The counter class is an extension of the standard python
|
||||
dictionary type. It is specialized to have number values
|
||||
(integers or floats), and includes a handful of additional
|
||||
functions to ease the task of counting data. In particular,
|
||||
all keys are defaulted to have value 0. Using a dictionary:
|
||||
|
||||
a = {}
|
||||
print(a['test'])
|
||||
|
||||
would give an error, while the Counter class analogue:
|
||||
|
||||
>>> a = Counter()
|
||||
>>> print(a['test'])
|
||||
0
|
||||
|
||||
returns the default 0 value. Note that to reference a key
|
||||
that you know is contained in the counter,
|
||||
you can still use the dictionary syntax:
|
||||
|
||||
>>> a = Counter()
|
||||
>>> a['test'] = 2
|
||||
>>> print(a['test'])
|
||||
2
|
||||
|
||||
This is very useful for counting things without initializing their counts,
|
||||
see for example:
|
||||
|
||||
>>> a['blah'] += 1
|
||||
>>> print(a['blah'])
|
||||
1
|
||||
|
||||
The counter also includes additional functionality useful in implementing
|
||||
the classifiers for this assignment. Two counters can be added,
|
||||
subtracted or multiplied together. See below for details. They can
|
||||
also be normalized and their total count and arg max can be extracted.
|
||||
"""
|
||||
|
||||
def __getitem__(self, idx):
|
||||
self.setdefault(idx, 0)
|
||||
return dict.__getitem__(self, idx)
|
||||
|
||||
def incrementAll(self, keys, count):
|
||||
"""
|
||||
Increments all elements of keys by the same count.
|
||||
|
||||
>>> a = Counter()
|
||||
>>> a.incrementAll(['one','two', 'three'], 1)
|
||||
>>> a['one']
|
||||
1
|
||||
>>> a['two']
|
||||
1
|
||||
"""
|
||||
for key in keys:
|
||||
self[key] += count
|
||||
|
||||
def argMax(self):
|
||||
"""
|
||||
Returns the key with the highest value.
|
||||
"""
|
||||
if len(self.keys()) == 0: return None
|
||||
all = self.items()
|
||||
values = [x[1] for x in all]
|
||||
maxIndex = values.index(max(values))
|
||||
return all[maxIndex][0]
|
||||
|
||||
def sortedKeys(self):
|
||||
"""
|
||||
Returns a list of keys sorted by their values. Keys
|
||||
with the highest values will appear first.
|
||||
|
||||
>>> a = Counter()
|
||||
>>> a['first'] = -2
|
||||
>>> a['second'] = 4
|
||||
>>> a['third'] = 1
|
||||
>>> a.sortedKeys()
|
||||
['second', 'third', 'first']
|
||||
"""
|
||||
sortedItems = self.items()
|
||||
compare = lambda x, y: sign(y[1] - x[1])
|
||||
sortedItems.sort(cmp=compare)
|
||||
return [x[0] for x in sortedItems]
|
||||
|
||||
def totalCount(self):
|
||||
"""
|
||||
Returns the sum of counts for all keys.
|
||||
"""
|
||||
return sum(self.values())
|
||||
|
||||
def normalize(self):
|
||||
"""
|
||||
Edits the counter such that the total count of all
|
||||
keys sums to 1. The ratio of counts for all keys
|
||||
will remain the same. Note that normalizing an empty
|
||||
Counter will result in an error.
|
||||
"""
|
||||
total = float(self.totalCount())
|
||||
if total == 0: return
|
||||
for key in self.keys():
|
||||
self[key] = self[key] / total
|
||||
|
||||
def divideAll(self, divisor):
|
||||
"""
|
||||
Divides all counts by divisor
|
||||
"""
|
||||
divisor = float(divisor)
|
||||
for key in self:
|
||||
self[key] /= divisor
|
||||
|
||||
def copy(self):
|
||||
"""
|
||||
Returns a copy of the counter
|
||||
"""
|
||||
return Counter(dict.copy(self))
|
||||
|
||||
def __mul__(self, y):
|
||||
"""
|
||||
Multiplying two counters gives the dot product of their vectors where
|
||||
each unique label is a vector element.
|
||||
|
||||
>>> a = Counter()
|
||||
>>> b = Counter()
|
||||
>>> a['first'] = -2
|
||||
>>> a['second'] = 4
|
||||
>>> b['first'] = 3
|
||||
>>> b['second'] = 5
|
||||
>>> a['third'] = 1.5
|
||||
>>> a['fourth'] = 2.5
|
||||
>>> a * b
|
||||
14
|
||||
"""
|
||||
sum = 0
|
||||
x = self
|
||||
if len(x) > len(y):
|
||||
x, y = y, x
|
||||
for key in x:
|
||||
if key not in y:
|
||||
continue
|
||||
sum += x[key] * y[key]
|
||||
return sum
|
||||
|
||||
def __radd__(self, y):
|
||||
"""
|
||||
Adding another counter to a counter increments the current counter
|
||||
by the values stored in the second counter.
|
||||
|
||||
>>> a = Counter()
|
||||
>>> b = Counter()
|
||||
>>> a['first'] = -2
|
||||
>>> a['second'] = 4
|
||||
>>> b['first'] = 3
|
||||
>>> b['third'] = 1
|
||||
>>> a += b
|
||||
>>> a['first']
|
||||
1
|
||||
"""
|
||||
for key, value in y.items():
|
||||
self[key] += value
|
||||
|
||||
def __add__(self, y):
|
||||
"""
|
||||
Adding two counters gives a counter with the union of all keys and
|
||||
counts of the second added to counts of the first.
|
||||
|
||||
>>> a = Counter()
|
||||
>>> b = Counter()
|
||||
>>> a['first'] = -2
|
||||
>>> a['second'] = 4
|
||||
>>> b['first'] = 3
|
||||
>>> b['third'] = 1
|
||||
>>> (a + b)['first']
|
||||
1
|
||||
"""
|
||||
addend = Counter()
|
||||
for key in self:
|
||||
if key in y:
|
||||
addend[key] = self[key] + y[key]
|
||||
else:
|
||||
addend[key] = self[key]
|
||||
for key in y:
|
||||
if key in self:
|
||||
continue
|
||||
addend[key] = y[key]
|
||||
return addend
|
||||
|
||||
def __sub__(self, y):
|
||||
"""
|
||||
Subtracting a counter from another gives a counter with the union of all keys and
|
||||
counts of the second subtracted from counts of the first.
|
||||
|
||||
>>> a = Counter()
|
||||
>>> b = Counter()
|
||||
>>> a['first'] = -2
|
||||
>>> a['second'] = 4
|
||||
>>> b['first'] = 3
|
||||
>>> b['third'] = 1
|
||||
>>> (a - b)['first']
|
||||
-5
|
||||
"""
|
||||
addend = Counter()
|
||||
for key in self:
|
||||
if key in y:
|
||||
addend[key] = self[key] - y[key]
|
||||
else:
|
||||
addend[key] = self[key]
|
||||
for key in y:
|
||||
if key in self:
|
||||
continue
|
||||
addend[key] = -1 * y[key]
|
||||
return addend
|
||||
|
||||
|
||||
def raiseNotDefined():
|
||||
fileName = inspect.stack()[1][1]
|
||||
line = inspect.stack()[1][2]
|
||||
method = inspect.stack()[1][3]
|
||||
|
||||
print("*** Method not implemented: %s at line %s of %s" % (method, line, fileName))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def normalize(vectorOrCounter):
|
||||
"""
|
||||
normalize a vector or counter by dividing each value by the sum of all values
|
||||
"""
|
||||
normalizedCounter = Counter()
|
||||
if type(vectorOrCounter) == type(normalizedCounter):
|
||||
counter = vectorOrCounter
|
||||
total = float(counter.totalCount())
|
||||
if total == 0: return counter
|
||||
for key in counter.keys():
|
||||
value = counter[key]
|
||||
normalizedCounter[key] = value / total
|
||||
return normalizedCounter
|
||||
else:
|
||||
vector = vectorOrCounter
|
||||
s = float(sum(vector))
|
||||
if s == 0: return vector
|
||||
return [el / s for el in vector]
|
||||
|
||||
|
||||
def nSample(distribution, values, n):
|
||||
if sum(distribution) != 1:
|
||||
distribution = normalize(distribution)
|
||||
rand = [random.random() for i in range(n)]
|
||||
rand.sort()
|
||||
samples = []
|
||||
samplePos, distPos, cdf = 0, 0, distribution[0]
|
||||
while samplePos < n:
|
||||
if rand[samplePos] < cdf:
|
||||
samplePos += 1
|
||||
samples.append(values[distPos])
|
||||
else:
|
||||
distPos += 1
|
||||
cdf += distribution[distPos]
|
||||
return samples
|
||||
|
||||
|
||||
def sample(distribution, values=None):
|
||||
if type(distribution) == Counter:
|
||||
items = sorted(distribution.items())
|
||||
distribution = [i[1] for i in items]
|
||||
values = [i[0] for i in items]
|
||||
if sum(distribution) != 1:
|
||||
distribution = normalize(distribution)
|
||||
choice = random.random()
|
||||
i, total = 0, distribution[0]
|
||||
while choice > total:
|
||||
i += 1
|
||||
total += distribution[i]
|
||||
return values[i]
|
||||
|
||||
|
||||
def sampleFromCounter(ctr):
|
||||
items = sorted(ctr.items())
|
||||
return sample([v for k, v in items], [k for k, v in items])
|
||||
|
||||
|
||||
def getProbability(value, distribution, values):
|
||||
"""
|
||||
Gives the probability of a value under a discrete distribution
|
||||
defined by (distributions, values).
|
||||
"""
|
||||
total = 0.0
|
||||
for prob, val in zip(distribution, values):
|
||||
if val == value:
|
||||
total += prob
|
||||
return total
|
||||
|
||||
|
||||
def flipCoin(p):
|
||||
r = random.random()
|
||||
return r < p
|
||||
|
||||
|
||||
def chooseFromDistribution(distribution):
|
||||
"Takes either a counter or a list of (prob, key) pairs and samples"
|
||||
if type(distribution) == dict or type(distribution) == Counter:
|
||||
return sample(distribution)
|
||||
r = random.random()
|
||||
base = 0.0
|
||||
for prob, element in distribution:
|
||||
base += prob
|
||||
if r <= base: return element
|
||||
|
||||
|
||||
def nearestPoint(pos):
|
||||
"""
|
||||
Finds the nearest grid point to a position (discretizes).
|
||||
"""
|
||||
(current_row, current_col) = pos
|
||||
|
||||
grid_row = int(current_row + 0.5)
|
||||
grid_col = int(current_col + 0.5)
|
||||
return (grid_row, grid_col)
|
||||
|
||||
|
||||
def sign(x):
|
||||
"""
|
||||
Returns 1 or -1 depending on the sign of x
|
||||
"""
|
||||
if (x >= 0):
|
||||
return 1
|
||||
else:
|
||||
return -1
|
||||
|
||||
|
||||
def arrayInvert(array):
|
||||
"""
|
||||
Inverts a matrix stored as a list of lists.
|
||||
"""
|
||||
result = [[] for i in array]
|
||||
for outer in array:
|
||||
for inner in range(len(outer)):
|
||||
result[inner].append(outer[inner])
|
||||
return result
|
||||
|
||||
|
||||
def matrixAsList(matrix, value=True):
|
||||
"""
|
||||
Turns a matrix into a list of coordinates matching the specified value
|
||||
"""
|
||||
rows, cols = len(matrix), len(matrix[0])
|
||||
cells = []
|
||||
for row in range(rows):
|
||||
for col in range(cols):
|
||||
if matrix[row][col] == value:
|
||||
cells.append((row, col))
|
||||
return cells
|
||||
|
||||
|
||||
def lookup(name, namespace):
|
||||
"""
|
||||
Get a method or class from any imported module from its name.
|
||||
Usage: lookup(functionName, globals())
|
||||
"""
|
||||
dots = name.count('.')
|
||||
if dots > 0:
|
||||
moduleName, objName = '.'.join(name.split('.')[:-1]), name.split('.')[-1]
|
||||
module = __import__(moduleName)
|
||||
return getattr(module, objName)
|
||||
else:
|
||||
modules = [obj for obj in namespace.values() if str(type(obj)) == "<type 'module'>"]
|
||||
options = [getattr(module, name) for module in modules if name in dir(module)]
|
||||
options += [obj[1] for obj in namespace.items() if obj[0] == name]
|
||||
if len(options) == 1: return options[0]
|
||||
if len(options) > 1: raise Exception('Name conflict for %s')
|
||||
raise Exception('%s not found as a method or class' % name)
|
||||
|
||||
|
||||
def pause():
|
||||
"""
|
||||
Pauses the output stream awaiting user feedback.
|
||||
"""
|
||||
print("<Press enter/return to continue>")
|
||||
raw_input()
|
||||
|
||||
|
||||
# code to handle timeouts
|
||||
#
|
||||
# FIXME
|
||||
# NOTE: TimeoutFuncton is NOT reentrant. Later timeouts will silently
|
||||
# disable earlier timeouts. Could be solved by maintaining a global list
|
||||
# of active time outs. Currently, questions which have test cases calling
|
||||
# this have all student code so wrapped.
|
||||
#
|
||||
import signal
|
||||
import time
|
||||
|
||||
|
||||
class TimeoutFunctionException(Exception):
|
||||
"""Exception to raise on a timeout"""
|
||||
pass
|
||||
|
||||
|
||||
class TimeoutFunction:
|
||||
def __init__(self, function, timeout):
|
||||
self.timeout = timeout
|
||||
self.function = function
|
||||
|
||||
def handle_timeout(self, signum, frame):
|
||||
raise TimeoutFunctionException()
|
||||
|
||||
def __call__(self, *args, **keyArgs):
|
||||
# If we have SIGALRM signal, use it to cause an exception if and
|
||||
# when this function runs too long. Otherwise check the time taken
|
||||
# after the method has returned, and throw an exception then.
|
||||
if hasattr(signal, 'SIGALRM'):
|
||||
old = signal.signal(signal.SIGALRM, self.handle_timeout)
|
||||
signal.alarm(self.timeout)
|
||||
try:
|
||||
result = self.function(*args, **keyArgs)
|
||||
finally:
|
||||
signal.signal(signal.SIGALRM, old)
|
||||
signal.alarm(0)
|
||||
else:
|
||||
startTime = time.time()
|
||||
result = self.function(*args, **keyArgs)
|
||||
timeElapsed = time.time() - startTime
|
||||
if timeElapsed >= self.timeout:
|
||||
self.handle_timeout(None, None)
|
||||
return result
|
||||
|
||||
|
||||
_ORIGINAL_STDOUT = None
|
||||
_ORIGINAL_STDERR = None
|
||||
_MUTED = False
|
||||
|
||||
|
||||
class WritableNull:
|
||||
def write(self, string):
|
||||
pass
|
||||
|
||||
|
||||
def mutePrint():
|
||||
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
|
||||
if _MUTED:
|
||||
return
|
||||
_MUTED = True
|
||||
|
||||
_ORIGINAL_STDOUT = sys.stdout
|
||||
# _ORIGINAL_STDERR = sys.stderr
|
||||
sys.stdout = WritableNull()
|
||||
# sys.stderr = WritableNull()
|
||||
|
||||
|
||||
def unmutePrint():
|
||||
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
|
||||
if not _MUTED:
|
||||
return
|
||||
_MUTED = False
|
||||
|
||||
sys.stdout = _ORIGINAL_STDOUT
|
||||
# sys.stderr = _ORIGINAL_STDERR
|
Reference in New Issue
Block a user