diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a5c0f87 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +multiagent1/ \ No newline at end of file diff --git a/docs/A*.md b/docs/A*.md new file mode 100644 index 0000000..e8b8f75 --- /dev/null +++ b/docs/A*.md @@ -0,0 +1,103 @@ +# 启发式函数与A*搜索算法 + +## 介绍 + +在人工智能和路径规划中,启发式函数和A*搜索算法是两个重要的概念。启发式函数用于估计从当前状态到目标状态的代价,而A\*搜索算法则利用这些估计来找到最优路径。 + +## 启发式函数 + +### 什么是启发式函数? + +启发式函数(Heuristic Function)是用于估计当前状态到目标状态之间的最小成本的函数。它的设计是为了加速搜索算法,使其更高效地找到解决方案。 + +### 启发式函数的性质 + +1. **可接受性(Admissibility)**: + - 一个启发式函数是可接受的,如果它从不高估从节点到目标节点的实际最小成本。 + - 数学定义:对于所有节点 \(n\),启发式函数 \(h(n)\) 必须满足 \(h(n) \leq h^*(n)\),其中 \(h^*(n)\) 是从节点 \(n\) 到目标节点的实际成本。 + +2. **一致性(Consistency)**: + - 一致性的启发式函数也称为单调性启发式函数。如果对于所有节点 \(n\) 和其每个子节点 \(m\),启发式函数 \(h\) 满足 \(h(n) \leq c(n, m) + h(m)\),其中 \(c(n, m)\) 是从节点 \(n\) 到节点 \(m\) 的实际成本。 + - 数学定义:\(h(n) \leq c(n, m) + h(m)\)。 + +### 启发式函数的示例 + +1. **曼哈顿距离(Manhattan Distance)**: + - 在网格路径规划中,曼哈顿距离是两个点之间沿轴线方向的总距离。 + - 公式:\(h(n) = |x_1 - x_2| + |y_1 - y_2|\)。 + +2. **欧几里得距离(Euclidean Distance)**: + - 欧几里得距离是两点之间的直线距离。 + - 公式:\(h(n) = \sqrt{(x_1 - x_2)^2 + (y_1 - y_2)^2}\)。 + +## A*搜索算法 + +### 什么是A*搜索? + +A*搜索是一种图搜索算法,它结合了Dijkstra算法和贪婪最佳优先搜索的优点。A*搜索使用启发式函数来引导搜索方向,从而找到从起始点到目标点的最优路径。 + +### A*搜索的工作原理 + +1. **初始化**: + - 将起始节点添加到优先队列中,初始代价为0。 + +2. **搜索过程**: + - 从优先队列中取出总代价最小的节点作为当前节点。 + - 如果当前节点是目标节点,则搜索结束。 + - 否则,扩展当前节点的所有邻居节点,并更新它们的代价和优先级。 + - 重复上述步骤,直到找到目标节点或优先队列为空。 + +3. **代价函数**: + - A*搜索使用一个代价函数 \(f(n) = g(n) + h(n)\) 来评估每个节点的优先级。 + - 其中,\(g(n)\) 是从起始节点到节点 \(n\) 的实际代价,\(h(n)\) 是从节点 \(n\) 到目标节点的启发式估计代价。 + +### A*搜索的伪代码 + +```pseudo +function A*(start, goal) + openSet := {start} + cameFrom := empty map + + gScore := map with default value of Infinity + gScore[start] := 0 + + fScore := map with default value of Infinity + fScore[start] := heuristic(start, goal) + + while openSet is not empty + current := node in openSet with lowest fScore[current] + if current == goal + return reconstruct_path(cameFrom, current) + + openSet.remove(current) + for each neighbor of current + tentative_gScore := gScore[current] + d(current, neighbor) + if tentative_gScore < gScore[neighbor] + cameFrom[neighbor] := current + gScore[neighbor] := tentative_gScore + fScore[neighbor] := gScore[neighbor] + heuristic(neighbor, goal) + if neighbor not in openSet + openSet.add(neighbor) + + return failure + +function reconstruct_path(cameFrom, current) + total_path := {current} + while current in cameFrom + current := cameFrom[current] + total_path.prepend(current) + return total_path +``` + +### A*搜索的应用 + +- **视频游戏**:用于角色路径规划。 +- **路径规划问题**:如地图导航、机器人路径规划等。 +- **资源规划问题**:如物流和供应链管理。 +- **语言分析**:如句法分析。 +- **机器翻译和语音识别**:用于寻找最优匹配和路径。 + +## 结论 + +启发式函数和A\*搜索算法是解决复杂路径规划和搜索问题的重要工具。通过设计有效的启发式函数,可以显著提高搜索算法的效率。A*搜索算法结合了路径代价和启发式估计,是找到最优路径的强大方法。 + diff --git a/docs/minmax.md b/docs/minmax.md new file mode 100644 index 0000000..e3105c8 --- /dev/null +++ b/docs/minmax.md @@ -0,0 +1,140 @@ +# Minimax 搜索算法 + +## 介绍 + +Minimax 是一种用于两人对弈游戏的决策算法,如国际象棋、井字棋和跳棋。它帮助确定玩家的最佳行动,假设对手也在最优地进行游戏。Minimax 算法的目标是在最小化可能损失的情况下最大化玩家的得分(因此得名 "minimax")。 + +## 基本概念 + +### 游戏树 + +游戏树表示游戏中所有可能的移动。从当前状态开始,每个节点代表一个游戏状态,每条边代表一个移动。 + +### 最大化和最小化玩家 + +- **最大化玩家(Max)**:这个玩家试图获得尽可能高的分数。 +- **最小化玩家(Min)**:这个玩家试图最小化最大化玩家的得分。 + +## Minimax 如何工作 + +1. **生成游戏树**:从当前游戏状态开始,生成所有可能的未来状态,直到某个预定深度。 +2. **评估叶节点**:使用评估函数为每个叶节点(终止状态或最大深度的状态)分配一个分数。 +3. **回溯分数**: + - 对于 Max 节点,选择子节点中得分最高的。 + - 对于 Min 节点,选择子节点中得分最低的。 +4. **选择最佳移动**:在根节点(当前游戏状态),选择导致 Max 玩家得分最高的移动。 + +## 示例 + +让我们以井字棋为例。假设现在是 Max 的回合,当前棋盘如下: + +``` + X | O | X +----------- + O | X | +----------- + | | O +``` + +### 步骤 1:生成游戏树 + +生成 Max 的所有可能移动。由于是 Max 的回合,在每个空位上放置一个 "X": + +``` + X | O | X X | O | X X | O | X +----------- ----------- ----------- + O | X | X O | X | O | X | +----------- ----------- ----------- + | | O X | | O X | | O +``` + +### 步骤 2:评估叶节点 + +为叶节点分配分数。假设: +- Max 获胜得 +10 分 +- Min 获胜得 -10 分 +- 平局得 0 分 + +``` + X | O | X X | O | X X | O | X +----------- ----------- ----------- + O | X | X O | X | O | X | +----------- ----------- ----------- + | | O X | | O X | | O + +得分: 0 得分: 10 得分: 10 +``` + +### 步骤 3:回溯分数 + +由于根节点是 Max 的回合,选择子节点中得分最高的: + +``` + Max + | + V + 10 +``` + +### 步骤 4:选择最佳移动 + +Max 将选择导致得分为 10 的移动。 + +## 伪代码 + +以下是 Minimax 算法的简单伪代码: + +```pseudo +function minimax(node, depth, maximizingPlayer) + if depth == 0 or node is a terminal node + return evaluate(node) + + if maximizingPlayer + maxEval = -∞ + for each child of node + eval = minimax(child, depth - 1, false) + maxEval = max(maxEval, eval) + return maxEval + else + minEval = +∞ + for each child of node + eval = minimax(child, depth - 1, true) + minEval = min(minEval, eval) + return minEval +``` + +## Alpha-Beta 剪枝 + +Alpha-Beta 剪枝是 Minimax 算法的一种优化。它通过剪枝那些不会影响最终决策的分支,减少需要评估的节点数量。 + +### 带 Alpha-Beta 剪枝的伪代码 + +```pseudo +function alphabeta(node, depth, α, β, maximizingPlayer) + if depth == 0 or node is a terminal node + return evaluate(node) + + if maximizingPlayer + maxEval = -∞ + for each child of node + eval = alphabeta(child, depth - 1, α, β, false) + maxEval = max(maxEval, eval) + α = max(α, eval) + if β <= α + break + return maxEval + else + minEval = +∞ + for each child of node + eval = alphabeta(child, depth - 1, α, β, true) + minEval = min(minEval, eval) + β = min(β, eval) + if β <= α + break + return minEval +``` + +## 结论 + +Minimax 是一种在两人对弈游戏中做出最佳决策的强大算法。通过考虑所有可能的移动及其结果,它确保玩家在假设对手也在最优地进行游戏的情况下做出最佳移动。Alpha-Beta 剪枝进一步提高了 Minimax 的效率,减少了需要评估的节点数量。 + diff --git a/multiagent/VERSION b/multiagent/VERSION new file mode 100644 index 0000000..259e855 --- /dev/null +++ b/multiagent/VERSION @@ -0,0 +1 @@ +v1.004 diff --git a/multiagent/autograder.py b/multiagent/autograder.py new file mode 100644 index 0000000..03a048a --- /dev/null +++ b/multiagent/autograder.py @@ -0,0 +1,362 @@ +# autograder.py +# ------------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +# imports from python standard library +import grading +import importlib.util +import optparse +import os +import pprint +import re +import sys +import projectParams +import random +random.seed(0) +try: + from pacman import GameState +except: + pass + +# register arguments and set default values +def readCommand(argv): + parser = optparse.OptionParser( + description='Run public tests on student code') + parser.set_defaults(generateSolutions=False, edxOutput=False, gsOutput=False, + muteOutput=False, printTestCase=False, noGraphics=False) + parser.add_option('--test-directory', + dest='testRoot', + default='test_cases', + help='Root test directory which contains subdirectories corresponding to each question') + parser.add_option('--student-code', + dest='studentCode', + default=projectParams.STUDENT_CODE_DEFAULT, + help='comma separated list of student code files') + parser.add_option('--code-directory', + dest='codeRoot', + default="", + help='Root directory containing the student and testClass code') + parser.add_option('--test-case-code', + dest='testCaseCode', + default=projectParams.PROJECT_TEST_CLASSES, + help='class containing testClass classes for this project') + parser.add_option('--generate-solutions', + dest='generateSolutions', + action='store_true', + help='Write solutions generated to .solution file') + parser.add_option('--edx-output', + dest='edxOutput', + action='store_true', + help='Generate edX output files') + parser.add_option('--gradescope-output', + dest='gsOutput', + action='store_true', + help='Generate GradeScope output files') + parser.add_option('--mute', + dest='muteOutput', + action='store_true', + help='Mute output from executing tests') + parser.add_option('--print-tests', '-p', + dest='printTestCase', + action='store_true', + help='Print each test case before running them.') + parser.add_option('--test', '-t', + dest='runTest', + default=None, + help='Run one particular test. Relative to test root.') + parser.add_option('--question', '-q', + dest='gradeQuestion', + default=None, + help='Grade one particular question.') + parser.add_option('--no-graphics', + dest='noGraphics', + action='store_true', + help='No graphics display for pacman games.') + (options, args) = parser.parse_args(argv) + return options + + +# confirm we should author solution files +def confirmGenerate(): + print('WARNING: this action will overwrite any solution files.') + print('Are you sure you want to proceed? (yes/no)') + while True: + ans = sys.stdin.readline().strip() + if ans == 'yes': + break + elif ans == 'no': + sys.exit(0) + else: + print('please answer either "yes" or "no"') + + +# TODO: Fix this so that it tracebacks work correctly +# Looking at source of the traceback module, presuming it works +# the same as the intepreters, it uses co_filename. This is, +# however, a readonly attribute. +def setModuleName(module, filename): + functionType = type(confirmGenerate) + classType = type(optparse.Option) + + for i in dir(module): + o = getattr(module, i) + if hasattr(o, '__file__'): + continue + + if type(o) == functionType: + setattr(o, '__file__', filename) + elif type(o) == classType: + setattr(o, '__file__', filename) + # TODO: assign member __file__'s? + # print i, type(o) + + +#from cStringIO import StringIO + +def loadModuleString(moduleSource): + # Below broken, imp doesn't believe its being passed a file: + # ValueError: load_module arg#2 should be a file or None + # + #f = StringIO(moduleCodeDict[k]) + #tmp = imp.load_module(k, f, k, (".py", "r", imp.PY_SOURCE)) + tmp = imp.new_module(k) + exec(moduleCodeDict[k], tmp.__dict__) + setModuleName(tmp, k) + return tmp + + +def loadModuleFile(moduleName, filePath): + # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly + spec = importlib.util.spec_from_file_location(moduleName, filePath) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def readFile(path, root=""): + "Read file from disk at specified path and return as string" + with open(os.path.join(root, path), 'r') as handle: + return handle.read() + + +####################################################################### +# Error Hint Map +####################################################################### + +# TODO: use these +ERROR_HINT_MAP = { + 'q1': { + "": """ + We noticed that your project threw an IndexError on q1. + While many things may cause this, it may have been from + assuming a certain number of successors from a state space + or assuming a certain number of actions available from a given + state. Try making your code more general (no hardcoded indices) + and submit again! + """ + }, + 'q3': { + "": """ + We noticed that your project threw an AttributeError on q3. + While many things may cause this, it may have been from assuming + a certain size or structure to the state space. For example, if you have + a line of code assuming that the state is (x, y) and we run your code + on a state space with (x, y, z), this error could be thrown. Try + making your code more general and submit again! + + """ + } +} + + +def splitStrings(d): + d2 = dict(d) + for k in d: + if k[0:2] == "__": + del d2[k] + continue + if d2[k].find("\n") >= 0: + d2[k] = d2[k].split("\n") + return d2 + + +def printTest(testDict, solutionDict): + pp = pprint.PrettyPrinter(indent=4) + print("Test case:") + for line in testDict["__raw_lines__"]: + print(" |", line) + print("Solution:") + for line in solutionDict["__raw_lines__"]: + print(" |", line) + + +def runTest(testName, moduleDict, printTestCase=False, display=None): + import testParser + import testClasses + for module in moduleDict: + setattr(sys.modules[__name__], module, moduleDict[module]) + + testDict = testParser.TestParser(testName + ".test").parse() + solutionDict = testParser.TestParser(testName + ".solution").parse() + test_out_file = os.path.join('%s.test_output' % testName) + testDict['test_out_file'] = test_out_file + testClass = getattr(projectTestClasses, testDict['class']) + + questionClass = getattr(testClasses, 'Question') + question = questionClass({'max_points': 0}, display) + testCase = testClass(question, testDict) + + if printTestCase: + printTest(testDict, solutionDict) + + # This is a fragile hack to create a stub grades object + grades = grading.Grades(projectParams.PROJECT_NAME, [(None, 0)]) + testCase.execute(grades, moduleDict, solutionDict) + + +# returns all the tests you need to run in order to run question +def getDepends(testParser, testRoot, question): + allDeps = [question] + questionDict = testParser.TestParser( + os.path.join(testRoot, question, 'CONFIG')).parse() + if 'depends' in questionDict: + depends = questionDict['depends'].split() + for d in depends: + # run dependencies first + allDeps = getDepends(testParser, testRoot, d) + allDeps + return allDeps + +# get list of questions to grade +def getTestSubdirs(testParser, testRoot, questionToGrade): + problemDict = testParser.TestParser( + os.path.join(testRoot, 'CONFIG')).parse() + if questionToGrade != None: + questions = getDepends(testParser, testRoot, questionToGrade) + if len(questions) > 1: + print('Note: due to dependencies, the following tests will be run: %s' % + ' '.join(questions)) + return questions + if 'order' in problemDict: + return problemDict['order'].split() + return sorted(os.listdir(testRoot)) + + +# evaluate student code +def evaluate(generateSolutions, testRoot, moduleDict, exceptionMap=ERROR_HINT_MAP, + edxOutput=False, muteOutput=False, gsOutput=False, + printTestCase=False, questionToGrade=None, display=None): + # imports of testbench code. note that the testClasses import must follow + # the import of student code due to dependencies + import testParser + import testClasses + for module in moduleDict: + setattr(sys.modules[__name__], module, moduleDict[module]) + + questions = [] + questionDicts = {} + test_subdirs = getTestSubdirs(testParser, testRoot, questionToGrade) + for q in test_subdirs: + subdir_path = os.path.join(testRoot, q) + if not os.path.isdir(subdir_path) or q[0] == '.': + continue + + # create a question object + questionDict = testParser.TestParser( + os.path.join(subdir_path, 'CONFIG')).parse() + questionClass = getattr(testClasses, questionDict['class']) + question = questionClass(questionDict, display) + questionDicts[q] = questionDict + + # load test cases into question + tests = [t for t in os.listdir( + subdir_path) if re.match(r'[^#~.].*\.test\Z', t)] + tests = [re.match(r'(.*)\.test\Z', t).group(1) for t in tests] + for t in sorted(tests): + test_file = os.path.join(subdir_path, '%s.test' % t) + solution_file = os.path.join(subdir_path, '%s.solution' % t) + test_out_file = os.path.join(subdir_path, '%s.test_output' % t) + testDict = testParser.TestParser(test_file).parse() + if testDict.get("disabled", "false").lower() == "true": + continue + testDict['test_out_file'] = test_out_file + testClass = getattr(projectTestClasses, testDict['class']) + testCase = testClass(question, testDict) + + def makefun(testCase, solution_file): + if generateSolutions: + # write solution file to disk + return lambda grades: testCase.writeSolution(moduleDict, solution_file) + else: + # read in solution dictionary and pass as an argument + testDict = testParser.TestParser(test_file).parse() + solutionDict = testParser.TestParser(solution_file).parse() + if printTestCase: + return lambda grades: printTest(testDict, solutionDict) or testCase.execute(grades, moduleDict, solutionDict) + else: + return lambda grades: testCase.execute(grades, moduleDict, solutionDict) + question.addTestCase(testCase, makefun(testCase, solution_file)) + + # Note extra function is necessary for scoping reasons + def makefun(question): + return lambda grades: question.execute(grades) + setattr(sys.modules[__name__], q, makefun(question)) + questions.append((q, question.getMaxPoints())) + + grades = grading.Grades(projectParams.PROJECT_NAME, questions, + gsOutput=gsOutput, edxOutput=edxOutput, muteOutput=muteOutput) + if questionToGrade == None: + for q in questionDicts: + for prereq in questionDicts[q].get('depends', '').split(): + grades.addPrereq(q, prereq) + + grades.grade(sys.modules[__name__], bonusPic=projectParams.BONUS_PIC) + return grades.points + + +def getDisplay(graphicsByDefault, options=None): + graphics = graphicsByDefault + if options is not None and options.noGraphics: + graphics = False + if graphics: + try: + import graphicsDisplay + return graphicsDisplay.PacmanGraphics(1, frameTime=.05) + except ImportError: + pass + import textDisplay + return textDisplay.NullGraphics() + + +if __name__ == '__main__': + options = readCommand(sys.argv) + if options.generateSolutions: + confirmGenerate() + codePaths = options.studentCode.split(',') + + moduleDict = {} + for cp in codePaths: + moduleName = re.match(r'.*?([^/]*)\.py', cp).group(1) + moduleDict[moduleName] = loadModuleFile( + moduleName, os.path.join(options.codeRoot, cp)) + moduleName = re.match(r'.*?([^/]*)\.py', options.testCaseCode).group(1) + moduleDict['projectTestClasses'] = loadModuleFile( + moduleName, os.path.join(options.codeRoot, options.testCaseCode)) + + if options.runTest != None: + runTest(options.runTest, moduleDict, printTestCase=options.printTestCase, + display=getDisplay(True, options)) + else: + evaluate(options.generateSolutions, options.testRoot, moduleDict, + gsOutput=options.gsOutput, + edxOutput=options.edxOutput, muteOutput=options.muteOutput, printTestCase=options.printTestCase, + questionToGrade=options.gradeQuestion, display=getDisplay(options.gradeQuestion != None, options)) diff --git a/multiagent/game.py b/multiagent/game.py new file mode 100644 index 0000000..00adb7a --- /dev/null +++ b/multiagent/game.py @@ -0,0 +1,778 @@ +# game.py +# ------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +# game.py +# ------- +# Licensing Information: Please do not distribute or publish solutions to this +# project. You are free to use and extend these projects for educational +# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by +# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html + +from util import * +import time +import os +import traceback +import sys + +####################### +# Parts worth reading # +####################### + + +class Agent: + """ + An agent must define a getAction method, but may also define the + following methods which will be called if they exist: + + def registerInitialState(self, state): # inspects the starting state + """ + + def __init__(self, index=0): + self.index = index + + def getAction(self, state): + """ + The Agent will receive a GameState (from either {pacman, capture, sonar}.py) and + must return an action from Directions.{North, South, East, West, Stop} + """ + raiseNotDefined() + + +class Directions: + NORTH = 'North' + SOUTH = 'South' + EAST = 'East' + WEST = 'West' + STOP = 'Stop' + + LEFT = {NORTH: WEST, + SOUTH: EAST, + EAST: NORTH, + WEST: SOUTH, + STOP: STOP} + + RIGHT = dict([(y, x) for x, y in list(LEFT.items())]) + + REVERSE = {NORTH: SOUTH, + SOUTH: NORTH, + EAST: WEST, + WEST: EAST, + STOP: STOP} + + +class Configuration: + """ + A Configuration holds the (x,y) coordinate of a character, along with its + traveling direction. + + The convention for positions, like a graph, is that (0,0) is the lower left corner, x increases + horizontally and y increases vertically. Therefore, north is the direction of increasing y, or (0,1). + """ + + def __init__(self, pos, direction): + self.pos = pos + self.direction = direction + + def getPosition(self): + return (self.pos) + + def getDirection(self): + return self.direction + + def isInteger(self): + x, y = self.pos + return x == int(x) and y == int(y) + + def __eq__(self, other): + if other == None: + return False + return (self.pos == other.pos and self.direction == other.direction) + + def __hash__(self): + x = hash(self.pos) + y = hash(self.direction) + return hash(x + 13 * y) + + def __str__(self): + return "(x,y)="+str(self.pos)+", "+str(self.direction) + + def generateSuccessor(self, vector): + """ + Generates a new configuration reached by translating the current + configuration by the action vector. This is a low-level call and does + not attempt to respect the legality of the movement. + + Actions are movement vectors. + """ + x, y = self.pos + dx, dy = vector + direction = Actions.vectorToDirection(vector) + if direction == Directions.STOP: + direction = self.direction # There is no stop direction + return Configuration((x + dx, y+dy), direction) + + +class AgentState: + """ + AgentStates hold the state of an agent (configuration, speed, scared, etc). + """ + + def __init__(self, startConfiguration, isPacman): + self.start = startConfiguration + self.configuration = startConfiguration + self.isPacman = isPacman + self.scaredTimer = 0 + # state below potentially used for contest only + self.numCarrying = 0 + self.numReturned = 0 + + def __str__(self): + if self.isPacman: + return "Pacman: " + str(self.configuration) + else: + return "Ghost: " + str(self.configuration) + + def __eq__(self, other): + if other == None: + return False + return self.configuration == other.configuration and self.scaredTimer == other.scaredTimer + + def __hash__(self): + return hash(hash(self.configuration) + 13 * hash(self.scaredTimer)) + + def copy(self): + state = AgentState(self.start, self.isPacman) + state.configuration = self.configuration + state.scaredTimer = self.scaredTimer + state.numCarrying = self.numCarrying + state.numReturned = self.numReturned + return state + + def getPosition(self): + if self.configuration == None: + return None + return self.configuration.getPosition() + + def getDirection(self): + return self.configuration.getDirection() + + +class Grid: + """ + A 2-dimensional array of objects backed by a list of lists. Data is accessed + via grid[x][y] where (x,y) are positions on a Pacman map with x horizontal, + y vertical and the origin (0,0) in the bottom left corner. + + The __str__ method constructs an output that is oriented like a pacman board. + """ + + def __init__(self, width, height, initialValue=False, bitRepresentation=None): + if initialValue not in [False, True]: + raise Exception('Grids can only contain booleans') + self.CELLS_PER_INT = 30 + + self.width = width + self.height = height + self.data = [[initialValue for y in range( + height)] for x in range(width)] + if bitRepresentation: + self._unpackBits(bitRepresentation) + + def __getitem__(self, i): + return self.data[i] + + def __setitem__(self, key, item): + self.data[key] = item + + def __str__(self): + out = [[str(self.data[x][y])[0] for x in range(self.width)] + for y in range(self.height)] + out.reverse() + return '\n'.join([''.join(x) for x in out]) + + def __eq__(self, other): + if other == None: + return False + return self.data == other.data + + def __hash__(self): + # return hash(str(self)) + base = 1 + h = 0 + for l in self.data: + for i in l: + if i: + h += base + base *= 2 + return hash(h) + + def copy(self): + g = Grid(self.width, self.height) + g.data = [x[:] for x in self.data] + return g + + def deepCopy(self): + return self.copy() + + def shallowCopy(self): + g = Grid(self.width, self.height) + g.data = self.data + return g + + def count(self, item=True): + return sum([x.count(item) for x in self.data]) + + def asList(self, key=True): + list = [] + for x in range(self.width): + for y in range(self.height): + if self[x][y] == key: + list.append((x, y)) + return list + + def packBits(self): + """ + Returns an efficient int list representation + + (width, height, bitPackedInts...) + """ + bits = [self.width, self.height] + currentInt = 0 + for i in range(self.height * self.width): + bit = self.CELLS_PER_INT - (i % self.CELLS_PER_INT) - 1 + x, y = self._cellIndexToPosition(i) + if self[x][y]: + currentInt += 2 ** bit + if (i + 1) % self.CELLS_PER_INT == 0: + bits.append(currentInt) + currentInt = 0 + bits.append(currentInt) + return tuple(bits) + + def _cellIndexToPosition(self, index): + x = index / self.height + y = index % self.height + return x, y + + def _unpackBits(self, bits): + """ + Fills in data from a bit-level representation + """ + cell = 0 + for packed in bits: + for bit in self._unpackInt(packed, self.CELLS_PER_INT): + if cell == self.width * self.height: + break + x, y = self._cellIndexToPosition(cell) + self[x][y] = bit + cell += 1 + + def _unpackInt(self, packed, size): + bools = [] + if packed < 0: + raise ValueError("must be a positive integer") + for i in range(size): + n = 2 ** (self.CELLS_PER_INT - i - 1) + if packed >= n: + bools.append(True) + packed -= n + else: + bools.append(False) + return bools + + +def reconstituteGrid(bitRep): + if type(bitRep) is not type((1, 2)): + return bitRep + width, height = bitRep[:2] + return Grid(width, height, bitRepresentation=bitRep[2:]) + +#################################### +# Parts you shouldn't have to read # +#################################### + + +class Actions: + """ + A collection of static methods for manipulating move actions. + """ + # Directions + _directions = {Directions.WEST: (-1, 0), + Directions.STOP: (0, 0), + Directions.EAST: (1, 0), + Directions.NORTH: (0, 1), + Directions.SOUTH: (0, -1)} + + _directionsAsList = [('West', (-1, 0)), ('Stop', (0, 0)), ('East', (1, 0)), ('North', (0, 1)), ('South', (0, -1))] + + TOLERANCE = .001 + + def reverseDirection(action): + if action == Directions.NORTH: + return Directions.SOUTH + if action == Directions.SOUTH: + return Directions.NORTH + if action == Directions.EAST: + return Directions.WEST + if action == Directions.WEST: + return Directions.EAST + return action + reverseDirection = staticmethod(reverseDirection) + + def vectorToDirection(vector): + dx, dy = vector + if dy > 0: + return Directions.NORTH + if dy < 0: + return Directions.SOUTH + if dx < 0: + return Directions.WEST + if dx > 0: + return Directions.EAST + return Directions.STOP + vectorToDirection = staticmethod(vectorToDirection) + + def directionToVector(direction, speed=1.0): + dx, dy = Actions._directions[direction] + return (dx * speed, dy * speed) + directionToVector = staticmethod(directionToVector) + + def getPossibleActions(config, walls): + possible = [] + x, y = config.pos + x_int, y_int = int(x + 0.5), int(y + 0.5) + + # In between grid points, all agents must continue straight + if (abs(x - x_int) + abs(y - y_int) > Actions.TOLERANCE): + return [config.getDirection()] + + for dir, vec in Actions._directionsAsList: + dx, dy = vec + next_y = y_int + dy + next_x = x_int + dx + if not walls[next_x][next_y]: + possible.append(dir) + + return possible + + getPossibleActions = staticmethod(getPossibleActions) + + def getLegalNeighbors(position, walls): + x, y = position + x_int, y_int = int(x + 0.5), int(y + 0.5) + neighbors = [] + for dir, vec in Actions._directionsAsList: + dx, dy = vec + next_x = x_int + dx + if next_x < 0 or next_x == walls.width: + continue + next_y = y_int + dy + if next_y < 0 or next_y == walls.height: + continue + if not walls[next_x][next_y]: + neighbors.append((next_x, next_y)) + return neighbors + getLegalNeighbors = staticmethod(getLegalNeighbors) + + def getSuccessor(position, action): + dx, dy = Actions.directionToVector(action) + x, y = position + return (x + dx, y + dy) + getSuccessor = staticmethod(getSuccessor) + + +class GameStateData: + + def __init__(self, prevState=None): + """ + Generates a new data packet by copying information from its predecessor. + """ + if prevState != None: + self.food = prevState.food.shallowCopy() + self.capsules = prevState.capsules[:] + self.agentStates = self.copyAgentStates(prevState.agentStates) + self.layout = prevState.layout + self._eaten = prevState._eaten + self.score = prevState.score + + self._foodEaten = None + self._foodAdded = None + self._capsuleEaten = None + self._agentMoved = None + self._lose = False + self._win = False + self.scoreChange = 0 + + def deepCopy(self): + state = GameStateData(self) + state.food = self.food.deepCopy() + state.layout = self.layout.deepCopy() + state._agentMoved = self._agentMoved + state._foodEaten = self._foodEaten + state._foodAdded = self._foodAdded + state._capsuleEaten = self._capsuleEaten + return state + + def copyAgentStates(self, agentStates): + copiedStates = [] + for agentState in agentStates: + copiedStates.append(agentState.copy()) + return copiedStates + + def __eq__(self, other): + """ + Allows two states to be compared. + """ + if other == None: + return False + # TODO Check for type of other + if not self.agentStates == other.agentStates: + return False + if not self.food == other.food: + return False + if not self.capsules == other.capsules: + return False + if not self.score == other.score: + return False + return True + + def __hash__(self): + """ + Allows states to be keys of dictionaries. + """ + for i, state in enumerate(self.agentStates): + try: + int(hash(state)) + except TypeError as e: + print(e) + # hash(state) + return int((hash(tuple(self.agentStates)) + 13*hash(self.food) + 113 * hash(tuple(self.capsules)) + 7 * hash(self.score)) % 1048575) + + def __str__(self): + width, height = self.layout.width, self.layout.height + map = Grid(width, height) + if type(self.food) == type((1, 2)): + self.food = reconstituteGrid(self.food) + for x in range(width): + for y in range(height): + food, walls = self.food, self.layout.walls + map[x][y] = self._foodWallStr(food[x][y], walls[x][y]) + + for agentState in self.agentStates: + if agentState == None: + continue + if agentState.configuration == None: + continue + x, y = [int(i) for i in nearestPoint(agentState.configuration.pos)] + agent_dir = agentState.configuration.direction + if agentState.isPacman: + map[x][y] = self._pacStr(agent_dir) + else: + map[x][y] = self._ghostStr(agent_dir) + + for x, y in self.capsules: + map[x][y] = 'o' + + return str(map) + ("\nScore: %d\n" % self.score) + + def _foodWallStr(self, hasFood, hasWall): + if hasFood: + return '.' + elif hasWall: + return '%' + else: + return ' ' + + def _pacStr(self, dir): + if dir == Directions.NORTH: + return 'v' + if dir == Directions.SOUTH: + return '^' + if dir == Directions.WEST: + return '>' + return '<' + + def _ghostStr(self, dir): + return 'G' + if dir == Directions.NORTH: + return 'M' + if dir == Directions.SOUTH: + return 'W' + if dir == Directions.WEST: + return '3' + return 'E' + + def initialize(self, layout, numGhostAgents): + """ + Creates an initial game state from a layout array (see layout.py). + """ + self.food = layout.food.copy() + #self.capsules = [] + self.capsules = layout.capsules[:] + self.layout = layout + self.score = 0 + self.scoreChange = 0 + + self.agentStates = [] + numGhosts = 0 + for isPacman, pos in layout.agentPositions: + if not isPacman: + if numGhosts == numGhostAgents: + continue # Max ghosts reached already + else: + numGhosts += 1 + self.agentStates.append(AgentState( + Configuration(pos, Directions.STOP), isPacman)) + self._eaten = [False for a in self.agentStates] + + +try: + import boinc + _BOINC_ENABLED = True +except: + _BOINC_ENABLED = False + + +class Game: + """ + The Game manages the control flow, soliciting actions from agents. + """ + + def __init__(self, agents, display, rules, startingIndex=0, muteAgents=False, catchExceptions=False): + self.agentCrashed = False + self.agents = agents + self.display = display + self.rules = rules + self.startingIndex = startingIndex + self.gameOver = False + self.muteAgents = muteAgents + self.catchExceptions = catchExceptions + self.moveHistory = [] + self.totalAgentTimes = [0 for agent in agents] + self.totalAgentTimeWarnings = [0 for agent in agents] + self.agentTimeout = False + import io + self.agentOutput = [io.StringIO() for agent in agents] + + def getProgress(self): + if self.gameOver: + return 1.0 + else: + return self.rules.getProgress(self) + + def _agentCrash(self, agentIndex, quiet=False): + "Helper method for handling agent crashes" + if not quiet: + traceback.print_exc() + self.gameOver = True + self.agentCrashed = True + self.rules.agentCrash(self, agentIndex) + + OLD_STDOUT = None + OLD_STDERR = None + + def mute(self, agentIndex): + if not self.muteAgents: + return + global OLD_STDOUT, OLD_STDERR + import io + OLD_STDOUT = sys.stdout + OLD_STDERR = sys.stderr + sys.stdout = self.agentOutput[agentIndex] + sys.stderr = self.agentOutput[agentIndex] + + def unmute(self): + if not self.muteAgents: + return + global OLD_STDOUT, OLD_STDERR + # Revert stdout/stderr to originals + sys.stdout = OLD_STDOUT + sys.stderr = OLD_STDERR + + def run(self): + """ + Main control loop for game play. + """ + self.display.initialize(self.state.data) + self.numMoves = 0 + + # self.display.initialize(self.state.makeObservation(1).data) + # inform learning agents of the game start + for i in range(len(self.agents)): + agent = self.agents[i] + if not agent: + self.mute(i) + # this is a null agent, meaning it failed to load + # the other team wins + print("Agent %d failed to load" % i, file=sys.stderr) + self.unmute() + self._agentCrash(i, quiet=True) + return + if ("registerInitialState" in dir(agent)): + self.mute(i) + if self.catchExceptions: + try: + timed_func = TimeoutFunction( + agent.registerInitialState, int(self.rules.getMaxStartupTime(i))) + try: + start_time = time.time() + timed_func(self.state.deepCopy()) + time_taken = time.time() - start_time + self.totalAgentTimes[i] += time_taken + except TimeoutFunctionException: + print("Agent %d ran out of time on startup!" % + i, file=sys.stderr) + self.unmute() + self.agentTimeout = True + self._agentCrash(i, quiet=True) + return + except Exception as data: + self._agentCrash(i, quiet=False) + self.unmute() + return + else: + agent.registerInitialState(self.state.deepCopy()) + # TODO: could this exceed the total time + self.unmute() + + agentIndex = self.startingIndex + numAgents = len(self.agents) + + while not self.gameOver: + # Fetch the next agent + agent = self.agents[agentIndex] + move_time = 0 + skip_action = False + # Generate an observation of the state + if 'observationFunction' in dir(agent): + self.mute(agentIndex) + if self.catchExceptions: + try: + timed_func = TimeoutFunction(agent.observationFunction, int( + self.rules.getMoveTimeout(agentIndex))) + try: + start_time = time.time() + observation = timed_func(self.state.deepCopy()) + except TimeoutFunctionException: + skip_action = True + move_time += time.time() - start_time + self.unmute() + except Exception as data: + self._agentCrash(agentIndex, quiet=False) + self.unmute() + return + else: + observation = agent.observationFunction( + self.state.deepCopy()) + self.unmute() + else: + observation = self.state.deepCopy() + + # Solicit an action + action = None + self.mute(agentIndex) + if self.catchExceptions: + try: + timed_func = TimeoutFunction(agent.getAction, int( + self.rules.getMoveTimeout(agentIndex)) - int(move_time)) + try: + start_time = time.time() + if skip_action: + raise TimeoutFunctionException() + action = timed_func(observation) + except TimeoutFunctionException: + print("Agent %d timed out on a single move!" % + agentIndex, file=sys.stderr) + self.agentTimeout = True + self._agentCrash(agentIndex, quiet=True) + self.unmute() + return + + move_time += time.time() - start_time + + if move_time > self.rules.getMoveWarningTime(agentIndex): + self.totalAgentTimeWarnings[agentIndex] += 1 + print("Agent %d took too long to make a move! This is warning %d" % ( + agentIndex, self.totalAgentTimeWarnings[agentIndex]), file=sys.stderr) + if self.totalAgentTimeWarnings[agentIndex] > self.rules.getMaxTimeWarnings(agentIndex): + print("Agent %d exceeded the maximum number of warnings: %d" % ( + agentIndex, self.totalAgentTimeWarnings[agentIndex]), file=sys.stderr) + self.agentTimeout = True + self._agentCrash(agentIndex, quiet=True) + self.unmute() + return + + self.totalAgentTimes[agentIndex] += move_time + # print "Agent: %d, time: %f, total: %f" % (agentIndex, move_time, self.totalAgentTimes[agentIndex]) + if self.totalAgentTimes[agentIndex] > self.rules.getMaxTotalTime(agentIndex): + print("Agent %d ran out of time! (time: %1.2f)" % ( + agentIndex, self.totalAgentTimes[agentIndex]), file=sys.stderr) + self.agentTimeout = True + self._agentCrash(agentIndex, quiet=True) + self.unmute() + return + self.unmute() + except Exception as data: + self._agentCrash(agentIndex) + self.unmute() + return + else: + action = agent.getAction(observation) + self.unmute() + + # Execute the action + self.moveHistory.append((agentIndex, action)) + if self.catchExceptions: + try: + self.state = self.state.generateSuccessor( + agentIndex, action) + except Exception as data: + self.mute(agentIndex) + self._agentCrash(agentIndex) + self.unmute() + return + else: + self.state = self.state.generateSuccessor(agentIndex, action) + + # Change the display + self.display.update(self.state.data) + ###idx = agentIndex - agentIndex % 2 + 1 + ###self.display.update( self.state.makeObservation(idx).data ) + + # Allow for game specific conditions (winning, losing, etc.) + self.rules.process(self.state, self) + # Track progress + if agentIndex == numAgents + 1: + self.numMoves += 1 + # Next agent + agentIndex = (agentIndex + 1) % numAgents + + if _BOINC_ENABLED: + boinc.set_fraction_done(self.getProgress()) + + # inform a learning agent of the game result + for agentIndex, agent in enumerate(self.agents): + if "final" in dir(agent): + try: + self.mute(agentIndex) + agent.final(self.state) + self.unmute() + except Exception as data: + if not self.catchExceptions: + raise + self._agentCrash(agentIndex) + self.unmute() + return + self.display.finish() diff --git a/multiagent/ghostAgents.py b/multiagent/ghostAgents.py new file mode 100644 index 0000000..0b84b50 --- /dev/null +++ b/multiagent/ghostAgents.py @@ -0,0 +1,93 @@ +# ghostAgents.py +# -------------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +from game import Agent +from game import Actions +from game import Directions +import random +from util import manhattanDistance +import util + + +class GhostAgent(Agent): + def __init__(self, index): + self.index = index + + def getAction(self, state): + dist = self.getDistribution(state) + if len(dist) == 0: + return Directions.STOP + else: + return util.chooseFromDistribution(dist) + + def getDistribution(self, state): + "Returns a Counter encoding a distribution over actions from the provided state." + util.raiseNotDefined() + + +class RandomGhost(GhostAgent): + "A ghost that chooses a legal action uniformly at random." + + def getDistribution(self, state): + dist = util.Counter() + for a in state.getLegalActions(self.index): + dist[a] = 1.0 + dist.normalize() + return dist + + +class DirectionalGhost(GhostAgent): + "A ghost that prefers to rush Pacman, or flee when scared." + + def __init__(self, index, prob_attack=0.8, prob_scaredFlee=0.8): + self.index = index + self.prob_attack = prob_attack + self.prob_scaredFlee = prob_scaredFlee + + def getDistribution(self, state): + # Read variables from state + ghostState = state.getGhostState(self.index) + legalActions = state.getLegalActions(self.index) + pos = state.getGhostPosition(self.index) + isScared = ghostState.scaredTimer > 0 + + speed = 1 + if isScared: + speed = 0.5 + + actionVectors = [Actions.directionToVector( + a, speed) for a in legalActions] + newPositions = [(pos[0]+a[0], pos[1]+a[1]) for a in actionVectors] + pacmanPosition = state.getPacmanPosition() + + # Select best actions given the state + distancesToPacman = [manhattanDistance( + pos, pacmanPosition) for pos in newPositions] + if isScared: + bestScore = max(distancesToPacman) + bestProb = self.prob_scaredFlee + else: + bestScore = min(distancesToPacman) + bestProb = self.prob_attack + bestActions = [action for action, distance in zip( + legalActions, distancesToPacman) if distance == bestScore] + + # Construct distribution + dist = util.Counter() + for a in bestActions: + dist[a] = bestProb / len(bestActions) + for a in legalActions: + dist[a] += (1-bestProb) / len(legalActions) + dist.normalize() + return dist diff --git a/multiagent/grading.py b/multiagent/grading.py new file mode 100644 index 0000000..30d628a --- /dev/null +++ b/multiagent/grading.py @@ -0,0 +1,328 @@ +# grading.py +# ---------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +"Common code for autograders" + +import html +import time +import sys +import json +import traceback +import pdb +from collections import defaultdict +import util + + +class Grades: + "A data structure for project grades, along with formatting code to display them" + + def __init__(self, projectName, questionsAndMaxesList, + gsOutput=False, edxOutput=False, muteOutput=False): + """ + Defines the grading scheme for a project + projectName: project name + questionsAndMaxesDict: a list of (question name, max points per question) + """ + self.questions = [el[0] for el in questionsAndMaxesList] + self.maxes = dict(questionsAndMaxesList) + self.points = Counter() + self.messages = dict([(q, []) for q in self.questions]) + self.project = projectName + self.start = time.localtime()[1:6] + self.sane = True # Sanity checks + self.currentQuestion = None # Which question we're grading + self.edxOutput = edxOutput + self.gsOutput = gsOutput # GradeScope output + self.mute = muteOutput + self.prereqs = defaultdict(set) + + # print 'Autograder transcript for %s' % self.project + print('Starting on %d-%d at %d:%02d:%02d' % self.start) + + def addPrereq(self, question, prereq): + self.prereqs[question].add(prereq) + + def grade(self, gradingModule, exceptionMap={}, bonusPic=False): + """ + Grades each question + gradingModule: the module with all the grading functions (pass in with sys.modules[__name__]) + """ + + completedQuestions = set([]) + for q in self.questions: + print('\nQuestion %s' % q) + print('=' * (9 + len(q))) + print() + self.currentQuestion = q + + incompleted = self.prereqs[q].difference(completedQuestions) + if len(incompleted) > 0: + prereq = incompleted.pop() + print("""*** NOTE: Make sure to complete Question %s before working on Question %s, +*** because Question %s builds upon your answer for Question %s. +""" % (prereq, q, q, prereq)) + continue + + if self.mute: + util.mutePrint() + try: + util.TimeoutFunction(getattr(gradingModule, q), 1800)( + self) # Call the question's function + # TimeoutFunction(getattr(gradingModule, q),1200)(self) # Call the question's function + except Exception as inst: + self.addExceptionMessage(q, inst, traceback) + self.addErrorHints(exceptionMap, inst, q[1]) + except: + self.fail('FAIL: Terminated with a string exception.') + finally: + if self.mute: + util.unmutePrint() + + if self.points[q] >= self.maxes[q]: + completedQuestions.add(q) + + print('\n### Question %s: %d/%d ###\n' % + (q, self.points[q], self.maxes[q])) + + print('\nFinished at %d:%02d:%02d' % time.localtime()[3:6]) + print("\nProvisional grades\n==================") + + for q in self.questions: + print('Question %s: %d/%d' % (q, self.points[q], self.maxes[q])) + print('------------------') + print('Total: %d/%d' % + (self.points.totalCount(), sum(self.maxes.values()))) + if bonusPic and self.points.totalCount() == 25: + print(""" + + ALL HAIL GRANDPAC. + LONG LIVE THE GHOSTBUSTING KING. + + --- ---- --- + | \ / + \ / | + | + \--/ \--/ + | + | + + | + | + + + | + @@@@@@@@@@@@@@@@@@@@@@@@@@ + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + \ / @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + V \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@ + \ / @@@@@@@@@@@@@@@@@@@@@@@@@@ + V @@@@@@@@@@@@@@@@@@@@@@@@ + @@@@@@@@@@@@@@@@@@@@@@ + /\ @@@@@@@@@@@@@@@@@@@@@@ + / \ @@@@@@@@@@@@@@@@@@@@@@@@@ + /\ / @@@@@@@@@@@@@@@@@@@@@@@@@@@ + / \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + / @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @@@@@@@@@@@@@@@@@@@@@@@@@@ + @@@@@@@@@@@@@@@@@@ + +""") + print(""" +Your grades are NOT yet registered. To register your grades, make sure +to follow your instructor's guidelines to receive credit on your project. +""") + + if self.edxOutput: + self.produceOutput() + if self.gsOutput: + self.produceGradeScopeOutput() + + def addExceptionMessage(self, q, inst, traceback): + """ + Method to format the exception message, this is more complicated because + we need to html.escape the traceback but wrap the exception in a
 tag
+        """
+        self.fail('FAIL: Exception raised: %s' % inst)
+        self.addMessage('')
+        for line in traceback.format_exc().split('\n'):
+            self.addMessage(line)
+
+    def addErrorHints(self, exceptionMap, errorInstance, questionNum):
+        typeOf = str(type(errorInstance))
+        questionName = 'q' + questionNum
+        errorHint = ''
+
+        # question specific error hints
+        if exceptionMap.get(questionName):
+            questionMap = exceptionMap.get(questionName)
+            if (questionMap.get(typeOf)):
+                errorHint = questionMap.get(typeOf)
+        # fall back to general error messages if a question specific
+        # one does not exist
+        if (exceptionMap.get(typeOf)):
+            errorHint = exceptionMap.get(typeOf)
+
+        # dont include the HTML if we have no error hint
+        if not errorHint:
+            return ''
+
+        for line in errorHint.split('\n'):
+            self.addMessage(line)
+
+    def produceGradeScopeOutput(self):
+        out_dct = {}
+
+        # total of entire submission
+        total_possible = sum(self.maxes.values())
+        total_score = sum(self.points.values())
+        out_dct['score'] = total_score
+        out_dct['max_score'] = total_possible
+        out_dct['output'] = "Total score (%d / %d)" % (
+            total_score, total_possible)
+
+        # individual tests
+        tests_out = []
+        for name in self.questions:
+            test_out = {}
+            # test name
+            test_out['name'] = name
+            # test score
+            test_out['score'] = self.points[name]
+            test_out['max_score'] = self.maxes[name]
+            # others
+            is_correct = self.points[name] >= self.maxes[name]
+            test_out['output'] = "  Question {num} ({points}/{max}) {correct}".format(
+                num=(name[1] if len(name) == 2 else name),
+                points=test_out['score'],
+                max=test_out['max_score'],
+                correct=('X' if not is_correct else ''),
+            )
+            test_out['tags'] = []
+            tests_out.append(test_out)
+        out_dct['tests'] = tests_out
+
+        # file output
+        with open('gradescope_response.json', 'w') as outfile:
+            json.dump(out_dct, outfile)
+        return
+
+    def produceOutput(self):
+        edxOutput = open('edx_response.html', 'w')
+        edxOutput.write("
") + + # first sum + total_possible = sum(self.maxes.values()) + total_score = sum(self.points.values()) + checkOrX = '' + if (total_score >= total_possible): + checkOrX = '' + header = """ +

+ Total score ({total_score} / {total_possible}) +

+ """.format(total_score=total_score, + total_possible=total_possible, + checkOrX=checkOrX + ) + edxOutput.write(header) + + for q in self.questions: + if len(q) == 2: + name = q[1] + else: + name = q + checkOrX = '' + if (self.points[q] >= self.maxes[q]): + checkOrX = '' + #messages = '\n
\n'.join(self.messages[q]) + messages = "
%s
" % '\n'.join(self.messages[q]) + output = """ +
+
+
+ Question {q} ({points}/{max}) {checkOrX} +
+
+ {messages} +
+
+
+ """.format(q=name, + max=self.maxes[q], + messages=messages, + checkOrX=checkOrX, + points=self.points[q] + ) + # print "*** output for Question %s " % q[1] + # print output + edxOutput.write(output) + edxOutput.write("
") + edxOutput.close() + edxOutput = open('edx_grade', 'w') + edxOutput.write(str(self.points.totalCount())) + edxOutput.close() + + def fail(self, message, raw=False): + "Sets sanity check bit to false and outputs a message" + self.sane = False + self.assignZeroCredit() + self.addMessage(message, raw) + + def assignZeroCredit(self): + self.points[self.currentQuestion] = 0 + + def addPoints(self, amt): + self.points[self.currentQuestion] += amt + + def deductPoints(self, amt): + self.points[self.currentQuestion] -= amt + + def assignFullCredit(self, message="", raw=False): + self.points[self.currentQuestion] = self.maxes[self.currentQuestion] + if message != "": + self.addMessage(message, raw) + + def addMessage(self, message, raw=False): + if not raw: + # We assume raw messages, formatted for HTML, are printed separately + if self.mute: + util.unmutePrint() + print('*** ' + message) + if self.mute: + util.mutePrint() + message = html.escape(message) + self.messages[self.currentQuestion].append(message) + + def addMessageToEmail(self, message): + print("WARNING**** addMessageToEmail is deprecated %s" % message) + for line in message.split('\n'): + pass + # print '%%% ' + line + ' %%%' + # self.messages[self.currentQuestion].append(line) + + +class Counter(dict): + """ + Dict with default 0 + """ + + def __getitem__(self, idx): + try: + return dict.__getitem__(self, idx) + except KeyError: + return 0 + + def totalCount(self): + """ + Returns the sum of counts for all keys. + """ + return sum(self.values()) diff --git a/multiagent/graphicsDisplay.py b/multiagent/graphicsDisplay.py new file mode 100644 index 0000000..b2a82e5 --- /dev/null +++ b/multiagent/graphicsDisplay.py @@ -0,0 +1,738 @@ +# graphicsDisplay.py +# ------------------ +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +from graphicsUtils import * +import math +import time +from game import Directions + +########################### +# GRAPHICS DISPLAY CODE # +########################### + +# Most code by Dan Klein and John Denero written or rewritten for cs188, UC Berkeley. +# Some code from a Pacman implementation by LiveWires, and used / modified with permission. + +DEFAULT_GRID_SIZE = 30.0 +INFO_PANE_HEIGHT = 35 +BACKGROUND_COLOR = formatColor(0, 0, 0) +WALL_COLOR = formatColor(0.0/255.0, 51.0/255.0, 255.0/255.0) +INFO_PANE_COLOR = formatColor(.4, .4, 0) +SCORE_COLOR = formatColor(.9, .9, .9) +PACMAN_OUTLINE_WIDTH = 2 +PACMAN_CAPTURE_OUTLINE_WIDTH = 4 + +GHOST_COLORS = [] +GHOST_COLORS.append(formatColor(.9, 0, 0)) # Red +GHOST_COLORS.append(formatColor(0, .3, .9)) # Blue +GHOST_COLORS.append(formatColor(.98, .41, .07)) # Orange +GHOST_COLORS.append(formatColor(.1, .75, .7)) # Green +GHOST_COLORS.append(formatColor(1.0, 0.6, 0.0)) # Yellow +GHOST_COLORS.append(formatColor(.4, 0.13, 0.91)) # Purple + +TEAM_COLORS = GHOST_COLORS[:2] + +GHOST_SHAPE = [ + (0, 0.3), + (0.25, 0.75), + (0.5, 0.3), + (0.75, 0.75), + (0.75, -0.5), + (0.5, -0.75), + (-0.5, -0.75), + (-0.75, -0.5), + (-0.75, 0.75), + (-0.5, 0.3), + (-0.25, 0.75) +] +GHOST_SIZE = 0.65 +SCARED_COLOR = formatColor(1, 1, 1) + +GHOST_VEC_COLORS = list(map(colorToVector, GHOST_COLORS)) + +PACMAN_COLOR = formatColor(255.0/255.0, 255.0/255.0, 61.0/255) +PACMAN_SCALE = 0.5 +#pacman_speed = 0.25 + +# Food +FOOD_COLOR = formatColor(1, 1, 1) +FOOD_SIZE = 0.1 + +# Laser +LASER_COLOR = formatColor(1, 0, 0) +LASER_SIZE = 0.02 + +# Capsule graphics +CAPSULE_COLOR = formatColor(1, 1, 1) +CAPSULE_SIZE = 0.25 + +# Drawing walls +WALL_RADIUS = 0.15 + + +class InfoPane: + def __init__(self, layout, gridSize): + self.gridSize = gridSize + self.width = (layout.width) * gridSize + self.base = (layout.height + 1) * gridSize + self.height = INFO_PANE_HEIGHT + self.fontSize = 24 + self.textColor = PACMAN_COLOR + self.drawPane() + + def toScreen(self, pos, y=None): + """ + Translates a point relative from the bottom left of the info pane. + """ + if y == None: + x, y = pos + else: + x = pos + + x = self.gridSize + x # Margin + y = self.base + y + return x, y + + def drawPane(self): + self.scoreText = text(self.toScreen( + 0, 0), self.textColor, "SCORE: 0", "Times", self.fontSize, "bold") + + def initializeGhostDistances(self, distances): + self.ghostDistanceText = [] + + size = 20 + if self.width < 240: + size = 12 + if self.width < 160: + size = 10 + + for i, d in enumerate(distances): + t = text(self.toScreen(self.width/2 + self.width/8 * i, 0), + GHOST_COLORS[i+1], d, "Times", size, "bold") + self.ghostDistanceText.append(t) + + def updateScore(self, score): + changeText(self.scoreText, "SCORE: % 4d" % score) + + def setTeam(self, isBlue): + text = "RED TEAM" + if isBlue: + text = "BLUE TEAM" + self.teamText = text(self.toScreen( + 300, 0), self.textColor, text, "Times", self.fontSize, "bold") + + def updateGhostDistances(self, distances): + if len(distances) == 0: + return + if 'ghostDistanceText' not in dir(self): + self.initializeGhostDistances(distances) + else: + for i, d in enumerate(distances): + changeText(self.ghostDistanceText[i], d) + + def drawGhost(self): + pass + + def drawPacman(self): + pass + + def drawWarning(self): + pass + + def clearIcon(self): + pass + + def updateMessage(self, message): + pass + + def clearMessage(self): + pass + + +class PacmanGraphics: + def __init__(self, zoom=1.0, frameTime=0.0, capture=False): + self.have_window = 0 + self.currentGhostImages = {} + self.pacmanImage = None + self.zoom = zoom + self.gridSize = DEFAULT_GRID_SIZE * zoom + self.capture = capture + self.frameTime = frameTime + + def checkNullDisplay(self): + return False + + def initialize(self, state, isBlue=False): + self.isBlue = isBlue + self.startGraphics(state) + + # self.drawDistributions(state) + self.distributionImages = None # Initialized lazily + self.drawStaticObjects(state) + self.drawAgentObjects(state) + + # Information + self.previousState = state + + def startGraphics(self, state): + self.layout = state.layout + layout = self.layout + self.width = layout.width + self.height = layout.height + self.make_window(self.width, self.height) + self.infoPane = InfoPane(layout, self.gridSize) + self.currentState = layout + + def drawDistributions(self, state): + walls = state.layout.walls + dist = [] + for x in range(walls.width): + distx = [] + dist.append(distx) + for y in range(walls.height): + (screen_x, screen_y) = self.to_screen((x, y)) + block = square((screen_x, screen_y), + 0.5 * self.gridSize, + color=BACKGROUND_COLOR, + filled=1, behind=2) + distx.append(block) + self.distributionImages = dist + + def drawStaticObjects(self, state): + layout = self.layout + self.drawWalls(layout.walls) + self.food = self.drawFood(layout.food) + self.capsules = self.drawCapsules(layout.capsules) + refresh() + + def drawAgentObjects(self, state): + self.agentImages = [] # (agentState, image) + for index, agent in enumerate(state.agentStates): + if agent.isPacman: + image = self.drawPacman(agent, index) + self.agentImages.append((agent, image)) + else: + image = self.drawGhost(agent, index) + self.agentImages.append((agent, image)) + refresh() + + def swapImages(self, agentIndex, newState): + """ + Changes an image from a ghost to a pacman or vis versa (for capture) + """ + prevState, prevImage = self.agentImages[agentIndex] + for item in prevImage: + remove_from_screen(item) + if newState.isPacman: + image = self.drawPacman(newState, agentIndex) + self.agentImages[agentIndex] = (newState, image) + else: + image = self.drawGhost(newState, agentIndex) + self.agentImages[agentIndex] = (newState, image) + refresh() + + def update(self, newState): + agentIndex = newState._agentMoved + agentState = newState.agentStates[agentIndex] + + if self.agentImages[agentIndex][0].isPacman != agentState.isPacman: + self.swapImages(agentIndex, agentState) + prevState, prevImage = self.agentImages[agentIndex] + if agentState.isPacman: + self.animatePacman(agentState, prevState, prevImage) + else: + self.moveGhost(agentState, agentIndex, prevState, prevImage) + self.agentImages[agentIndex] = (agentState, prevImage) + + if newState._foodEaten != None: + self.removeFood(newState._foodEaten, self.food) + if newState._capsuleEaten != None: + self.removeCapsule(newState._capsuleEaten, self.capsules) + self.infoPane.updateScore(newState.score) + if 'ghostDistances' in dir(newState): + self.infoPane.updateGhostDistances(newState.ghostDistances) + + def make_window(self, width, height): + grid_width = (width-1) * self.gridSize + grid_height = (height-1) * self.gridSize + screen_width = 2*self.gridSize + grid_width + screen_height = 2*self.gridSize + grid_height + INFO_PANE_HEIGHT + + begin_graphics(screen_width, + screen_height, + BACKGROUND_COLOR, + "CS188 Pacman") + + def drawPacman(self, pacman, index): + position = self.getPosition(pacman) + screen_point = self.to_screen(position) + endpoints = self.getEndpoints(self.getDirection(pacman)) + + width = PACMAN_OUTLINE_WIDTH + outlineColor = PACMAN_COLOR + fillColor = PACMAN_COLOR + + if self.capture: + outlineColor = TEAM_COLORS[index % 2] + fillColor = GHOST_COLORS[index] + width = PACMAN_CAPTURE_OUTLINE_WIDTH + + return [circle(screen_point, PACMAN_SCALE * self.gridSize, + fillColor=fillColor, outlineColor=outlineColor, + endpoints=endpoints, + width=width)] + + def getEndpoints(self, direction, position=(0, 0)): + x, y = position + pos = x - int(x) + y - int(y) + width = 30 + 80 * math.sin(math.pi * pos) + + delta = width / 2 + if (direction == 'West'): + endpoints = (180+delta, 180-delta) + elif (direction == 'North'): + endpoints = (90+delta, 90-delta) + elif (direction == 'South'): + endpoints = (270+delta, 270-delta) + else: + endpoints = (0+delta, 0-delta) + return endpoints + + def movePacman(self, position, direction, image): + screenPosition = self.to_screen(position) + endpoints = self.getEndpoints(direction, position) + r = PACMAN_SCALE * self.gridSize + moveCircle(image[0], screenPosition, r, endpoints) + refresh() + + def animatePacman(self, pacman, prevPacman, image): + if self.frameTime < 0: + print('Press any key to step forward, "q" to play') + keys = wait_for_keys() + if 'q' in keys: + self.frameTime = 0.1 + if self.frameTime > 0.01 or self.frameTime < 0: + start = time.time() + fx, fy = self.getPosition(prevPacman) + px, py = self.getPosition(pacman) + frames = 4.0 + for i in range(1, int(frames) + 1): + pos = px*i/frames + fx * \ + (frames-i)/frames, py*i/frames + fy*(frames-i)/frames + self.movePacman(pos, self.getDirection(pacman), image) + refresh() + sleep(abs(self.frameTime) / frames) + else: + self.movePacman(self.getPosition(pacman), + self.getDirection(pacman), image) + refresh() + + def getGhostColor(self, ghost, ghostIndex): + if ghost.scaredTimer > 0: + return SCARED_COLOR + else: + return GHOST_COLORS[ghostIndex] + + def drawGhost(self, ghost, agentIndex): + pos = self.getPosition(ghost) + dir = self.getDirection(ghost) + (screen_x, screen_y) = (self.to_screen(pos)) + coords = [] + for (x, y) in GHOST_SHAPE: + coords.append((x*self.gridSize*GHOST_SIZE + screen_x, + y*self.gridSize*GHOST_SIZE + screen_y)) + + colour = self.getGhostColor(ghost, agentIndex) + body = polygon(coords, colour, filled=1) + WHITE = formatColor(1.0, 1.0, 1.0) + BLACK = formatColor(0.0, 0.0, 0.0) + + dx = 0 + dy = 0 + if dir == 'North': + dy = -0.2 + if dir == 'South': + dy = 0.2 + if dir == 'East': + dx = 0.2 + if dir == 'West': + dx = -0.2 + leftEye = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y - + self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE) + rightEye = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y - + self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE) + leftPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y - + self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK) + rightPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y - + self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK) + ghostImageParts = [] + ghostImageParts.append(body) + ghostImageParts.append(leftEye) + ghostImageParts.append(rightEye) + ghostImageParts.append(leftPupil) + ghostImageParts.append(rightPupil) + + return ghostImageParts + + def moveEyes(self, pos, dir, eyes): + (screen_x, screen_y) = (self.to_screen(pos)) + dx = 0 + dy = 0 + if dir == 'North': + dy = -0.2 + if dir == 'South': + dy = 0.2 + if dir == 'East': + dx = 0.2 + if dir == 'West': + dx = -0.2 + moveCircle(eyes[0], (screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y - + self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2) + moveCircle(eyes[1], (screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y - + self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2) + moveCircle(eyes[2], (screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y - + self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08) + moveCircle(eyes[3], (screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y - + self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08) + + def moveGhost(self, ghost, ghostIndex, prevGhost, ghostImageParts): + old_x, old_y = self.to_screen(self.getPosition(prevGhost)) + new_x, new_y = self.to_screen(self.getPosition(ghost)) + delta = new_x - old_x, new_y - old_y + + for ghostImagePart in ghostImageParts: + move_by(ghostImagePart, delta) + refresh() + + if ghost.scaredTimer > 0: + color = SCARED_COLOR + else: + color = GHOST_COLORS[ghostIndex] + edit(ghostImageParts[0], ('fill', color), ('outline', color)) + self.moveEyes(self.getPosition(ghost), + self.getDirection(ghost), ghostImageParts[-4:]) + refresh() + + def getPosition(self, agentState): + if agentState.configuration == None: + return (-1000, -1000) + return agentState.getPosition() + + def getDirection(self, agentState): + if agentState.configuration == None: + return Directions.STOP + return agentState.configuration.getDirection() + + def finish(self): + end_graphics() + + def to_screen(self, point): + (x, y) = point + #y = self.height - y + x = (x + 1)*self.gridSize + y = (self.height - y)*self.gridSize + return (x, y) + + # Fixes some TK issue with off-center circles + def to_screen2(self, point): + (x, y) = point + #y = self.height - y + x = (x + 1)*self.gridSize + y = (self.height - y)*self.gridSize + return (x, y) + + def drawWalls(self, wallMatrix): + wallColor = WALL_COLOR + for xNum, x in enumerate(wallMatrix): + if self.capture and (xNum * 2) < wallMatrix.width: + wallColor = TEAM_COLORS[0] + if self.capture and (xNum * 2) >= wallMatrix.width: + wallColor = TEAM_COLORS[1] + + for yNum, cell in enumerate(x): + if cell: # There's a wall here + pos = (xNum, yNum) + screen = self.to_screen(pos) + screen2 = self.to_screen2(pos) + + # draw each quadrant of the square based on adjacent walls + wIsWall = self.isWall(xNum-1, yNum, wallMatrix) + eIsWall = self.isWall(xNum+1, yNum, wallMatrix) + nIsWall = self.isWall(xNum, yNum+1, wallMatrix) + sIsWall = self.isWall(xNum, yNum-1, wallMatrix) + nwIsWall = self.isWall(xNum-1, yNum+1, wallMatrix) + swIsWall = self.isWall(xNum-1, yNum-1, wallMatrix) + neIsWall = self.isWall(xNum+1, yNum+1, wallMatrix) + seIsWall = self.isWall(xNum+1, yNum-1, wallMatrix) + + # NE quadrant + if (not nIsWall) and (not eIsWall): + # inner circle + circle(screen2, WALL_RADIUS * self.gridSize, + wallColor, wallColor, (0, 91), 'arc') + if (nIsWall) and (not eIsWall): + # vertical line + line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, + (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor) + if (not nIsWall) and (eIsWall): + # horizontal line + line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, + (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor) + if (nIsWall) and (eIsWall) and (not neIsWall): + # outer circle + circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), + WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (180, 271), 'arc') + line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(-1)*WALL_RADIUS)), + add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor) + line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), + add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5))), wallColor) + + # NW quadrant + if (not nIsWall) and (not wIsWall): + # inner circle + circle(screen2, WALL_RADIUS * self.gridSize, + wallColor, wallColor, (90, 181), 'arc') + if (nIsWall) and (not wIsWall): + # vertical line + line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, + (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor) + if (not nIsWall) and (wIsWall): + # horizontal line + line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, + (self.gridSize*(-0.5)-1, self.gridSize*(-1)*WALL_RADIUS)), wallColor) + if (nIsWall) and (wIsWall) and (not nwIsWall): + # outer circle + circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), + WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (270, 361), 'arc') + line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(-1)*WALL_RADIUS)), + add(screen, (self.gridSize*(-0.5), self.gridSize*(-1)*WALL_RADIUS)), wallColor) + line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), + add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5))), wallColor) + + # SE quadrant + if (not sIsWall) and (not eIsWall): + # inner circle + circle(screen2, WALL_RADIUS * self.gridSize, + wallColor, wallColor, (270, 361), 'arc') + if (sIsWall) and (not eIsWall): + # vertical line + line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, + (self.gridSize*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor) + if (not sIsWall) and (eIsWall): + # horizontal line + line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, + (self.gridSize*0.5+1, self.gridSize*(1)*WALL_RADIUS)), wallColor) + if (sIsWall) and (eIsWall) and (not seIsWall): + # outer circle + circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), + WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (90, 181), 'arc') + line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(1)*WALL_RADIUS)), + add(screen, (self.gridSize*0.5, self.gridSize*(1)*WALL_RADIUS)), wallColor) + line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), + add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5))), wallColor) + + # SW quadrant + if (not sIsWall) and (not wIsWall): + # inner circle + circle(screen2, WALL_RADIUS * self.gridSize, + wallColor, wallColor, (180, 271), 'arc') + if (sIsWall) and (not wIsWall): + # vertical line + line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, + (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor) + if (not sIsWall) and (wIsWall): + # horizontal line + line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, + (self.gridSize*(-0.5)-1, self.gridSize*(1)*WALL_RADIUS)), wallColor) + if (sIsWall) and (wIsWall) and (not swIsWall): + # outer circle + circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), + WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (0, 91), 'arc') + line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(1)*WALL_RADIUS)), + add(screen, (self.gridSize*(-0.5), self.gridSize*(1)*WALL_RADIUS)), wallColor) + line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), + add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5))), wallColor) + + def isWall(self, x, y, walls): + if x < 0 or y < 0: + return False + if x >= walls.width or y >= walls.height: + return False + return walls[x][y] + + def drawFood(self, foodMatrix): + foodImages = [] + color = FOOD_COLOR + for xNum, x in enumerate(foodMatrix): + if self.capture and (xNum * 2) <= foodMatrix.width: + color = TEAM_COLORS[0] + if self.capture and (xNum * 2) > foodMatrix.width: + color = TEAM_COLORS[1] + imageRow = [] + foodImages.append(imageRow) + for yNum, cell in enumerate(x): + if cell: # There's food here + screen = self.to_screen((xNum, yNum)) + dot = circle(screen, + FOOD_SIZE * self.gridSize, + outlineColor=color, fillColor=color, + width=1) + imageRow.append(dot) + else: + imageRow.append(None) + return foodImages + + def drawCapsules(self, capsules): + capsuleImages = {} + for capsule in capsules: + (screen_x, screen_y) = self.to_screen(capsule) + dot = circle((screen_x, screen_y), + CAPSULE_SIZE * self.gridSize, + outlineColor=CAPSULE_COLOR, + fillColor=CAPSULE_COLOR, + width=1) + capsuleImages[capsule] = dot + return capsuleImages + + def removeFood(self, cell, foodImages): + x, y = cell + remove_from_screen(foodImages[x][y]) + + def removeCapsule(self, cell, capsuleImages): + x, y = cell + remove_from_screen(capsuleImages[(x, y)]) + + def drawExpandedCells(self, cells): + """ + Draws an overlay of expanded grid positions for search agents + """ + n = float(len(cells)) + baseColor = [1.0, 0.0, 0.0] + self.clearExpandedCells() + self.expandedCells = [] + for k, cell in enumerate(cells): + screenPos = self.to_screen(cell) + cellColor = formatColor( + *[(n-k) * c * .5 / n + .25 for c in baseColor]) + block = square(screenPos, + 0.5 * self.gridSize, + color=cellColor, + filled=1, behind=2) + self.expandedCells.append(block) + if self.frameTime < 0: + refresh() + + def clearExpandedCells(self): + if 'expandedCells' in dir(self) and len(self.expandedCells) > 0: + for cell in self.expandedCells: + remove_from_screen(cell) + + def updateDistributions(self, distributions): + "Draws an agent's belief distributions" + # copy all distributions so we don't change their state + distributions = [x.copy() for x in distributions] + if self.distributionImages == None: + self.drawDistributions(self.previousState) + for x in range(len(self.distributionImages)): + for y in range(len(self.distributionImages[0])): + image = self.distributionImages[x][y] + weights = [dist[(x, y)] for dist in distributions] + + if sum(weights) != 0: + pass + # Fog of war + color = [0.0, 0.0, 0.0] + colors = GHOST_VEC_COLORS[1:] # With Pacman + if self.capture: + colors = GHOST_VEC_COLORS + for weight, gcolor in zip(weights, colors): + color = [min(1.0, c + 0.95 * g * weight ** .3) + for c, g in zip(color, gcolor)] + changeColor(image, formatColor(*color)) + refresh() + + +class FirstPersonPacmanGraphics(PacmanGraphics): + def __init__(self, zoom=1.0, showGhosts=True, capture=False, frameTime=0): + PacmanGraphics.__init__(self, zoom, frameTime=frameTime) + self.showGhosts = showGhosts + self.capture = capture + + def initialize(self, state, isBlue=False): + + self.isBlue = isBlue + PacmanGraphics.startGraphics(self, state) + # Initialize distribution images + walls = state.layout.walls + dist = [] + self.layout = state.layout + + # Draw the rest + self.distributionImages = None # initialize lazily + self.drawStaticObjects(state) + self.drawAgentObjects(state) + + # Information + self.previousState = state + + def lookAhead(self, config, state): + if config.getDirection() == 'Stop': + return + else: + pass + # Draw relevant ghosts + allGhosts = state.getGhostStates() + visibleGhosts = state.getVisibleGhosts() + for i, ghost in enumerate(allGhosts): + if ghost in visibleGhosts: + self.drawGhost(ghost, i) + else: + self.currentGhostImages[i] = None + + def getGhostColor(self, ghost, ghostIndex): + return GHOST_COLORS[ghostIndex] + + def getPosition(self, ghostState): + if not self.showGhosts and not ghostState.isPacman and ghostState.getPosition()[1] > 1: + return (-1000, -1000) + else: + return PacmanGraphics.getPosition(self, ghostState) + + +def add(x, y): + return (x[0] + y[0], x[1] + y[1]) + + +# Saving graphical output +# ----------------------- +# Note: to make an animated gif from this postscript output, try the command: +# convert -delay 7 -loop 1 -compress lzw -layers optimize frame* out.gif +# convert is part of imagemagick (freeware) + +SAVE_POSTSCRIPT = False +POSTSCRIPT_OUTPUT_DIR = 'frames' +FRAME_NUMBER = 0 +import os + + +def saveFrame(): + "Saves the current graphical output as a postscript file" + global SAVE_POSTSCRIPT, FRAME_NUMBER, POSTSCRIPT_OUTPUT_DIR + if not SAVE_POSTSCRIPT: + return + if not os.path.exists(POSTSCRIPT_OUTPUT_DIR): + os.mkdir(POSTSCRIPT_OUTPUT_DIR) + name = os.path.join(POSTSCRIPT_OUTPUT_DIR, 'frame_%08d.ps' % FRAME_NUMBER) + FRAME_NUMBER += 1 + writePostscript(name) # writes the current canvas diff --git a/multiagent/graphicsUtils.py b/multiagent/graphicsUtils.py new file mode 100644 index 0000000..a406fbc --- /dev/null +++ b/multiagent/graphicsUtils.py @@ -0,0 +1,451 @@ +# graphicsUtils.py +# ---------------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +import sys +import math +import random +import string +import time +import types +import tkinter +import os.path + +_Windows = sys.platform == 'win32' # True if on Win95/98/NT + +_root_window = None # The root window for graphics output +_canvas = None # The canvas which holds graphics +_canvas_xs = None # Size of canvas object +_canvas_ys = None +_canvas_x = None # Current position on canvas +_canvas_y = None +_canvas_col = None # Current colour (set to black below) +_canvas_tsize = 12 +_canvas_tserifs = 0 + + +def formatColor(r, g, b): + return '#%02x%02x%02x' % (int(r * 255), int(g * 255), int(b * 255)) + + +def colorToVector(color): + return [int(x, 16) / 256.0 for x in [color[1:3], color[3:5], color[5:7]]] + + +if _Windows: + _canvas_tfonts = ['times new roman', 'lucida console'] +else: + _canvas_tfonts = ['times', 'lucidasans-24'] + pass # XXX need defaults here + + +def sleep(secs): + global _root_window + if _root_window == None: + time.sleep(secs) + else: + _root_window.update_idletasks() + _root_window.after(int(1000 * secs), _root_window.quit) + _root_window.mainloop() + + +def begin_graphics(width=640, height=480, color=formatColor(0, 0, 0), title=None): + + global _root_window, _canvas, _canvas_x, _canvas_y, _canvas_xs, _canvas_ys, _bg_color + + # Check for duplicate call + if _root_window is not None: + # Lose the window. + _root_window.destroy() + + # Save the canvas size parameters + _canvas_xs, _canvas_ys = width - 1, height - 1 + _canvas_x, _canvas_y = 0, _canvas_ys + _bg_color = color + + # Create the root window + _root_window = tkinter.Tk() + _root_window.protocol('WM_DELETE_WINDOW', _destroy_window) + _root_window.title(title or 'Graphics Window') + _root_window.resizable(0, 0) + + # Create the canvas object + try: + _canvas = tkinter.Canvas(_root_window, width=width, height=height) + _canvas.pack() + draw_background() + _canvas.update() + except: + _root_window = None + raise + + # Bind to key-down and key-up events + _root_window.bind("", _keypress) + _root_window.bind("", _keyrelease) + _root_window.bind("", _clear_keys) + _root_window.bind("", _clear_keys) + _root_window.bind("", _leftclick) + _root_window.bind("", _rightclick) + _root_window.bind("", _rightclick) + _root_window.bind("", _ctrl_leftclick) + _clear_keys() + + +_leftclick_loc = None +_rightclick_loc = None +_ctrl_leftclick_loc = None + + +def _leftclick(event): + global _leftclick_loc + _leftclick_loc = (event.x, event.y) + + +def _rightclick(event): + global _rightclick_loc + _rightclick_loc = (event.x, event.y) + + +def _ctrl_leftclick(event): + global _ctrl_leftclick_loc + _ctrl_leftclick_loc = (event.x, event.y) + + +def wait_for_click(): + while True: + global _leftclick_loc + global _rightclick_loc + global _ctrl_leftclick_loc + if _leftclick_loc != None: + val = _leftclick_loc + _leftclick_loc = None + return val, 'left' + if _rightclick_loc != None: + val = _rightclick_loc + _rightclick_loc = None + return val, 'right' + if _ctrl_leftclick_loc != None: + val = _ctrl_leftclick_loc + _ctrl_leftclick_loc = None + return val, 'ctrl_left' + sleep(0.05) + + +def draw_background(): + corners = [(0, 0), (0, _canvas_ys), + (_canvas_xs, _canvas_ys), (_canvas_xs, 0)] + polygon(corners, _bg_color, fillColor=_bg_color, + filled=True, smoothed=False) + + +def _destroy_window(event=None): + sys.exit(0) +# global _root_window +# _root_window.destroy() +# _root_window = None + # print "DESTROY" + + +def end_graphics(): + global _root_window, _canvas, _mouse_enabled + try: + try: + sleep(1) + if _root_window != None: + _root_window.destroy() + except SystemExit as e: + print('Ending graphics raised an exception:', e) + finally: + _root_window = None + _canvas = None + _mouse_enabled = 0 + _clear_keys() + + +def clear_screen(background=None): + global _canvas_x, _canvas_y + _canvas.delete('all') + draw_background() + _canvas_x, _canvas_y = 0, _canvas_ys + + +def polygon(coords, outlineColor, fillColor=None, filled=1, smoothed=1, behind=0, width=1): + c = [] + for coord in coords: + c.append(coord[0]) + c.append(coord[1]) + if fillColor == None: + fillColor = outlineColor + if filled == 0: + fillColor = "" + poly = _canvas.create_polygon( + c, outline=outlineColor, fill=fillColor, smooth=smoothed, width=width) + if behind > 0: + _canvas.tag_lower(poly, behind) # Higher should be more visible + return poly + + +def square(pos, r, color, filled=1, behind=0): + x, y = pos + coords = [(x - r, y - r), (x + r, y - r), (x + r, y + r), (x - r, y + r)] + return polygon(coords, color, color, filled, 0, behind=behind) + + +def circle(pos, r, outlineColor, fillColor, endpoints=None, style='pieslice', width=2): + x, y = pos + x0, x1 = x - r - 1, x + r + y0, y1 = y - r - 1, y + r + if endpoints == None: + e = [0, 359] + else: + e = list(endpoints) + while e[0] > e[1]: + e[1] = e[1] + 360 + + return _canvas.create_arc(x0, y0, x1, y1, outline=outlineColor, fill=fillColor, + extent=e[1] - e[0], start=e[0], style=style, width=width) + + +def image(pos, file="../../blueghost.gif"): + x, y = pos + # img = PhotoImage(file=file) + return _canvas.create_image(x, y, image=tkinter.PhotoImage(file=file), anchor=tkinter.NW) + + +def refresh(): + _canvas.update_idletasks() + + +def moveCircle(id, pos, r, endpoints=None): + global _canvas_x, _canvas_y + + x, y = pos +# x0, x1 = x - r, x + r + 1 +# y0, y1 = y - r, y + r + 1 + x0, x1 = x - r - 1, x + r + y0, y1 = y - r - 1, y + r + if endpoints == None: + e = [0, 359] + else: + e = list(endpoints) + while e[0] > e[1]: + e[1] = e[1] + 360 + + if os.path.isfile('flag'): + edit(id, ('extent', e[1] - e[0])) + else: + edit(id, ('start', e[0]), ('extent', e[1] - e[0])) + move_to(id, x0, y0) + + +def edit(id, *args): + _canvas.itemconfigure(id, **dict(args)) + + +def text(pos, color, contents, font='Helvetica', size=12, style='normal', anchor="nw"): + global _canvas_x, _canvas_y + x, y = pos + font = (font, str(size), style) + return _canvas.create_text(x, y, fill=color, text=contents, font=font, anchor=anchor) + + +def changeText(id, newText, font=None, size=12, style='normal'): + _canvas.itemconfigure(id, text=newText) + if font != None: + _canvas.itemconfigure(id, font=(font, '-%d' % size, style)) + + +def changeColor(id, newColor): + _canvas.itemconfigure(id, fill=newColor) + + +def line(here, there, color=formatColor(0, 0, 0), width=2): + x0, y0 = here[0], here[1] + x1, y1 = there[0], there[1] + return _canvas.create_line(x0, y0, x1, y1, fill=color, width=width) + +############################################################################## +### Keypress handling ######################################################## +############################################################################## + +# We bind to key-down and key-up events. + + +_keysdown = {} +_keyswaiting = {} +# This holds an unprocessed key release. We delay key releases by up to +# one call to keys_pressed() to get round a problem with auto repeat. +_got_release = None + + +def _keypress(event): + global _got_release + # remap_arrows(event) + _keysdown[event.keysym] = 1 + _keyswaiting[event.keysym] = 1 +# print event.char, event.keycode + _got_release = None + + +def _keyrelease(event): + global _got_release + # remap_arrows(event) + try: + del _keysdown[event.keysym] + except: + pass + _got_release = 1 + + +def remap_arrows(event): + # TURN ARROW PRESSES INTO LETTERS (SHOULD BE IN KEYBOARD AGENT) + if event.char in ['a', 's', 'd', 'w']: + return + if event.keycode in [37, 101]: # LEFT ARROW (win / x) + event.char = 'a' + if event.keycode in [38, 99]: # UP ARROW + event.char = 'w' + if event.keycode in [39, 102]: # RIGHT ARROW + event.char = 'd' + if event.keycode in [40, 104]: # DOWN ARROW + event.char = 's' + + +def _clear_keys(event=None): + global _keysdown, _got_release, _keyswaiting + _keysdown = {} + _keyswaiting = {} + _got_release = None + + +def keys_pressed(d_o_e=lambda arg: _root_window.dooneevent(arg), + d_w=tkinter._tkinter.DONT_WAIT): + d_o_e(d_w) + if _got_release: + d_o_e(d_w) + return list(_keysdown.keys()) + + +def keys_waiting(): + global _keyswaiting + keys = list(_keyswaiting.keys()) + _keyswaiting = {} + return keys + +# Block for a list of keys... + + +def wait_for_keys(): + keys = [] + while keys == []: + keys = keys_pressed() + sleep(0.05) + return keys + + +def remove_from_screen(x, + d_o_e=lambda arg: _root_window.dooneevent(arg), + d_w=tkinter._tkinter.DONT_WAIT): + _canvas.delete(x) + d_o_e(d_w) + + +def _adjust_coords(coord_list, x, y): + for i in range(0, len(coord_list), 2): + coord_list[i] = coord_list[i] + x + coord_list[i + 1] = coord_list[i + 1] + y + return coord_list + + +def move_to(object, x, y=None, + d_o_e=lambda arg: _root_window.dooneevent(arg), + d_w=tkinter._tkinter.DONT_WAIT): + if y is None: + try: + x, y = x + except: + raise Exception('incomprehensible coordinates') + + horiz = True + newCoords = [] + current_x, current_y = _canvas.coords(object)[0:2] # first point + for coord in _canvas.coords(object): + if horiz: + inc = x - current_x + else: + inc = y - current_y + horiz = not horiz + + newCoords.append(coord + inc) + + _canvas.coords(object, *newCoords) + d_o_e(d_w) + + +def move_by(object, x, y=None, + d_o_e=lambda arg: _root_window.dooneevent(arg), + d_w=tkinter._tkinter.DONT_WAIT, lift=False): + if y is None: + try: + x, y = x + except: + raise Exception('incomprehensible coordinates') + + horiz = True + newCoords = [] + for coord in _canvas.coords(object): + if horiz: + inc = x + else: + inc = y + horiz = not horiz + + newCoords.append(coord + inc) + + _canvas.coords(object, *newCoords) + d_o_e(d_w) + if lift: + _canvas.tag_raise(object) + + +def writePostscript(filename): + "Writes the current canvas to a postscript file." + psfile = file(filename, 'w') + psfile.write(_canvas.postscript(pageanchor='sw', + y='0.c', + x='0.c')) + psfile.close() + + +ghost_shape = [ + (0, - 0.5), + (0.25, - 0.75), + (0.5, - 0.5), + (0.75, - 0.75), + (0.75, 0.5), + (0.5, 0.75), + (- 0.5, 0.75), + (- 0.75, 0.5), + (- 0.75, - 0.75), + (- 0.5, - 0.5), + (- 0.25, - 0.75) +] + +if __name__ == '__main__': + begin_graphics() + clear_screen() + ghost_shape = [(x * 10 + 20, y * 10 + 20) for x, y in ghost_shape] + g = polygon(ghost_shape, formatColor(1, 1, 1)) + move_to(g, (50, 50)) + circle((150, 150), 20, formatColor(0.7, 0.3, 0.0), endpoints=[15, - 15]) + sleep(2) diff --git a/multiagent/keyboardAgents.py b/multiagent/keyboardAgents.py new file mode 100644 index 0000000..7c6d530 --- /dev/null +++ b/multiagent/keyboardAgents.py @@ -0,0 +1,95 @@ +# keyboardAgents.py +# ----------------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +from game import Agent +from game import Directions +import random + + +class KeyboardAgent(Agent): + """ + An agent controlled by the keyboard. + """ + # NOTE: Arrow keys also work. + WEST_KEY = 'a' + EAST_KEY = 'd' + NORTH_KEY = 'w' + SOUTH_KEY = 's' + STOP_KEY = 'q' + + def __init__(self, index=0): + + self.lastMove = Directions.STOP + self.index = index + self.keys = [] + + def getAction(self, state): + from graphicsUtils import keys_waiting + from graphicsUtils import keys_pressed + keys = keys_waiting() + keys_pressed() + if keys != []: + self.keys = keys + + legal = state.getLegalActions(self.index) + move = self.getMove(legal) + + if move == Directions.STOP: + # Try to move in the same direction as before + if self.lastMove in legal: + move = self.lastMove + + if (self.STOP_KEY in self.keys) and Directions.STOP in legal: + move = Directions.STOP + + if move not in legal: + move = random.choice(legal) + + self.lastMove = move + return move + + def getMove(self, legal): + move = Directions.STOP + if (self.WEST_KEY in self.keys or 'Left' in self.keys) and Directions.WEST in legal: + move = Directions.WEST + if (self.EAST_KEY in self.keys or 'Right' in self.keys) and Directions.EAST in legal: + move = Directions.EAST + if (self.NORTH_KEY in self.keys or 'Up' in self.keys) and Directions.NORTH in legal: + move = Directions.NORTH + if (self.SOUTH_KEY in self.keys or 'Down' in self.keys) and Directions.SOUTH in legal: + move = Directions.SOUTH + return move + + +class KeyboardAgent2(KeyboardAgent): + """ + A second agent controlled by the keyboard. + """ + # NOTE: Arrow keys also work. + WEST_KEY = 'j' + EAST_KEY = "l" + NORTH_KEY = 'i' + SOUTH_KEY = 'k' + STOP_KEY = 'u' + + def getMove(self, legal): + move = Directions.STOP + if (self.WEST_KEY in self.keys) and Directions.WEST in legal: + move = Directions.WEST + if (self.EAST_KEY in self.keys) and Directions.EAST in legal: + move = Directions.EAST + if (self.NORTH_KEY in self.keys) and Directions.NORTH in legal: + move = Directions.NORTH + if (self.SOUTH_KEY in self.keys) and Directions.SOUTH in legal: + move = Directions.SOUTH + return move diff --git a/multiagent/layout.py b/multiagent/layout.py new file mode 100644 index 0000000..f317852 --- /dev/null +++ b/multiagent/layout.py @@ -0,0 +1,164 @@ +# layout.py +# --------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +from util import manhattanDistance +from game import Grid +import os +import random +from functools import reduce + +VISIBILITY_MATRIX_CACHE = {} + + +class Layout: + """ + A Layout manages the static information about the game board. + """ + + def __init__(self, layoutText): + self.width = len(layoutText[0]) + self.height = len(layoutText) + self.walls = Grid(self.width, self.height, False) + self.food = Grid(self.width, self.height, False) + self.capsules = [] + self.agentPositions = [] + self.numGhosts = 0 + self.processLayoutText(layoutText) + self.layoutText = layoutText + self.totalFood = len(self.food.asList()) + # self.initializeVisibilityMatrix() + + def getNumGhosts(self): + return self.numGhosts + + def initializeVisibilityMatrix(self): + global VISIBILITY_MATRIX_CACHE + if reduce(str.__add__, self.layoutText) not in VISIBILITY_MATRIX_CACHE: + from game import Directions + vecs = [(-0.5, 0), (0.5, 0), (0, -0.5), (0, 0.5)] + dirs = [Directions.NORTH, Directions.SOUTH, + Directions.WEST, Directions.EAST] + vis = Grid(self.width, self.height, {Directions.NORTH: set(), Directions.SOUTH: set( + ), Directions.EAST: set(), Directions.WEST: set(), Directions.STOP: set()}) + for x in range(self.width): + for y in range(self.height): + if self.walls[x][y] == False: + for vec, direction in zip(vecs, dirs): + dx, dy = vec + nextx, nexty = x + dx, y + dy + while (nextx + nexty) != int(nextx) + int(nexty) or not self.walls[int(nextx)][int(nexty)]: + vis[x][y][direction].add((nextx, nexty)) + nextx, nexty = x + dx, y + dy + self.visibility = vis + VISIBILITY_MATRIX_CACHE[reduce(str.__add__, self.layoutText)] = vis + else: + self.visibility = VISIBILITY_MATRIX_CACHE[reduce( + str.__add__, self.layoutText)] + + def isWall(self, pos): + x, col = pos + return self.walls[x][col] + + def getRandomLegalPosition(self): + x = random.choice(list(range(self.width))) + y = random.choice(list(range(self.height))) + while self.isWall((x, y)): + x = random.choice(list(range(self.width))) + y = random.choice(list(range(self.height))) + return (x, y) + + def getRandomCorner(self): + poses = [(1, 1), (1, self.height - 2), (self.width - 2, 1), + (self.width - 2, self.height - 2)] + return random.choice(poses) + + def getFurthestCorner(self, pacPos): + poses = [(1, 1), (1, self.height - 2), (self.width - 2, 1), + (self.width - 2, self.height - 2)] + dist, pos = max([(manhattanDistance(p, pacPos), p) for p in poses]) + return pos + + def isVisibleFrom(self, ghostPos, pacPos, pacDirection): + row, col = [int(x) for x in pacPos] + return ghostPos in self.visibility[row][col][pacDirection] + + def __str__(self): + return "\n".join(self.layoutText) + + def deepCopy(self): + return Layout(self.layoutText[:]) + + def processLayoutText(self, layoutText): + """ + Coordinates are flipped from the input format to the (x,y) convention here + + The shape of the maze. Each character + represents a different type of object. + % - Wall + . - Food + o - Capsule + G - Ghost + P - Pacman + Other characters are ignored. + """ + maxY = self.height - 1 + for y in range(self.height): + for x in range(self.width): + layoutChar = layoutText[maxY - y][x] + self.processLayoutChar(x, y, layoutChar) + self.agentPositions.sort() + self.agentPositions = [(i == 0, pos) for i, pos in self.agentPositions] + + def processLayoutChar(self, x, y, layoutChar): + if layoutChar == '%': + self.walls[x][y] = True + elif layoutChar == '.': + self.food[x][y] = True + elif layoutChar == 'o': + self.capsules.append((x, y)) + elif layoutChar == 'P': + self.agentPositions.append((0, (x, y))) + elif layoutChar in ['G']: + self.agentPositions.append((1, (x, y))) + self.numGhosts += 1 + elif layoutChar in ['1', '2', '3', '4']: + self.agentPositions.append((int(layoutChar), (x, y))) + self.numGhosts += 1 + + +def getLayout(name, back=2): + if name.endswith('.lay'): + layout = tryToLoad('layouts/' + name) + if layout == None: + layout = tryToLoad(name) + else: + layout = tryToLoad('layouts/' + name + '.lay') + if layout == None: + layout = tryToLoad(name + '.lay') + if layout == None and back >= 0: + curdir = os.path.abspath('.') + os.chdir('..') + layout = getLayout(name, back - 1) + os.chdir(curdir) + return layout + + +def tryToLoad(fullname): + if(not os.path.exists(fullname)): + return None + f = open(fullname) + try: + return Layout([line.strip() for line in f]) + finally: + f.close() diff --git a/multiagent/layouts/capsuleClassic.lay b/multiagent/layouts/capsuleClassic.lay new file mode 100644 index 0000000..83e75f4 --- /dev/null +++ b/multiagent/layouts/capsuleClassic.lay @@ -0,0 +1,7 @@ +%%%%%%%%%%%%%%%%%%% +%G. G ....% +%.% % %%%%%% %.%%.% +%.%o% % o% %.o%.% +%.%%%.% %%% %..%.% +%..... P %..%G% +%%%%%%%%%%%%%%%%%%%% diff --git a/multiagent/layouts/contestClassic.lay b/multiagent/layouts/contestClassic.lay new file mode 100644 index 0000000..0d6195e --- /dev/null +++ b/multiagent/layouts/contestClassic.lay @@ -0,0 +1,9 @@ +%%%%%%%%%%%%%%%%%%%% +%o...%........%...o% +%.%%.%.%%..%%.%.%%.% +%...... G GG%......% +%.%.%%.%% %%%.%%.%.% +%.%....% ooo%.%..%.% +%.%.%%.% %% %.%.%%.% +%o%......P....%....% +%%%%%%%%%%%%%%%%%%%% diff --git a/multiagent/layouts/mediumClassic.lay b/multiagent/layouts/mediumClassic.lay new file mode 100644 index 0000000..2ef1b3c --- /dev/null +++ b/multiagent/layouts/mediumClassic.lay @@ -0,0 +1,11 @@ +%%%%%%%%%%%%%%%%%%%% +%o...%........%....% +%.%%.%.%%%%%%.%.%%.% +%.%..............%.% +%.%.%%.%% %%.%%.%.% +%......%G G%......% +%.%.%%.%%%%%%.%%.%.% +%.%..............%.% +%.%%.%.%%%%%%.%.%%.% +%....%...P....%...o% +%%%%%%%%%%%%%%%%%%%% diff --git a/multiagent/layouts/minimaxClassic.lay b/multiagent/layouts/minimaxClassic.lay new file mode 100644 index 0000000..202c335 --- /dev/null +++ b/multiagent/layouts/minimaxClassic.lay @@ -0,0 +1,5 @@ +%%%%%%%%% +%.P G% +% %.%G%%% +%G %%% +%%%%%%%%% diff --git a/multiagent/layouts/openClassic.lay b/multiagent/layouts/openClassic.lay new file mode 100644 index 0000000..ec22729 --- /dev/null +++ b/multiagent/layouts/openClassic.lay @@ -0,0 +1,9 @@ +%%%%%%%%%%%%%%%%%%%%%%%%% +%.. P .... .... % +%.. ... ... ... ... % +%.. ... ... ... ... % +%.. .... .... G % +%.. ... ... ... ... % +%.. ... ... ... ... % +%.. .... .... o% +%%%%%%%%%%%%%%%%%%%%%%%%% diff --git a/multiagent/layouts/originalClassic.lay b/multiagent/layouts/originalClassic.lay new file mode 100644 index 0000000..8023758 --- /dev/null +++ b/multiagent/layouts/originalClassic.lay @@ -0,0 +1,27 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%............%%............% +%.%%%%.%%%%%.%%.%%%%%.%%%%.% +%o%%%%.%%%%%.%%.%%%%%.%%%%o% +%.%%%%.%%%%%.%%.%%%%%.%%%%.% +%..........................% +%.%%%%.%%.%%%%%%%%.%%.%%%%.% +%.%%%%.%%.%%%%%%%%.%%.%%%%.% +%......%%....%%....%%......% +%%%%%%.%%%%% %% %%%%%.%%%%%% +%%%%%%.%%%%% %% %%%%%.%%%%%% +%%%%%%.% %.%%%%%% +%%%%%%.% %%%% %%%% %.%%%%%% +% . %G GG G% . % +%%%%%%.% %%%%%%%%%% %.%%%%%% +%%%%%%.% %.%%%%%% +%%%%%%.% %%%%%%%%%% %.%%%%%% +%............%%............% +%.%%%%.%%%%%.%%.%%%%%.%%%%.% +%.%%%%.%%%%%.%%.%%%%%.%%%%.% +%o..%%....... .......%%..o% +%%%.%%.%%.%%%%%%%%.%%.%%.%%% +%%%.%%.%%.%%%%%%%%.%%.%%.%%% +%......%%....%%....%%......% +%.%%%%%%%%%%.%%.%%%%%%%%%%.% +%.............P............% +%%%%%%%%%%%%%%%%%%%%%%%%%%%% diff --git a/multiagent/layouts/powerClassic.lay b/multiagent/layouts/powerClassic.lay new file mode 100644 index 0000000..1b87623 --- /dev/null +++ b/multiagent/layouts/powerClassic.lay @@ -0,0 +1,7 @@ +%%%%%%%%%%%%%%%%%%%% +%o....o%GGGG%o....o% +%..%...%% %%...%..% +%.%o.%........%.o%.% +%.o%.%.%%%%%%.%.%o.% +%........P.........% +%%%%%%%%%%%%%%%%%%%% diff --git a/multiagent/layouts/smallClassic.lay b/multiagent/layouts/smallClassic.lay new file mode 100644 index 0000000..5b704cb --- /dev/null +++ b/multiagent/layouts/smallClassic.lay @@ -0,0 +1,7 @@ +%%%%%%%%%%%%%%%%%%%% +%......%G G%......% +%.%%...%% %%...%%.% +%.%o.%........%.o%.% +%.%%.%.%%%%%%.%.%%.% +%........P.........% +%%%%%%%%%%%%%%%%%%%% diff --git a/multiagent/layouts/testClassic.lay b/multiagent/layouts/testClassic.lay new file mode 100644 index 0000000..f050aca --- /dev/null +++ b/multiagent/layouts/testClassic.lay @@ -0,0 +1,10 @@ +%%%%% +% . % +%.G.% +% . % +%. .% +% % +% .% +% % +%P .% +%%%%% diff --git a/multiagent/layouts/trappedClassic.lay b/multiagent/layouts/trappedClassic.lay new file mode 100644 index 0000000..6fdabd8 --- /dev/null +++ b/multiagent/layouts/trappedClassic.lay @@ -0,0 +1,5 @@ +%%%%%%%% +% P G% +%G%%%%%% +%.... % +%%%%%%%% diff --git a/multiagent/layouts/trickyClassic.lay b/multiagent/layouts/trickyClassic.lay new file mode 100644 index 0000000..2a8bec1 --- /dev/null +++ b/multiagent/layouts/trickyClassic.lay @@ -0,0 +1,13 @@ +%%%%%%%%%%%%%%%%%%%% +%o...%........%...o% +%.%%.%.%%..%%.%.%%.% +%.%.....%..%.....%.% +%.%.%%.%% %%.%%.%.% +%...... GGGG%.%....% +%.%....%%%%%%.%..%.% +%.%....% oo%.%..%.% +%.%....% %%%%.%..%.% +%.%...........%..%.% +%.%%.%.%%%%%%.%.%%.% +%o...%...P....%...o% +%%%%%%%%%%%%%%%%%%%% diff --git a/multiagent/multiAgents.py b/multiagent/multiAgents.py new file mode 100644 index 0000000..06a31a2 --- /dev/null +++ b/multiagent/multiAgents.py @@ -0,0 +1,179 @@ +# multiAgents.py +# -------------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +from util import manhattanDistance +from game import Directions +import random, util + +from game import Agent +from pacman import GameState + +class ReflexAgent(Agent): + """ + A reflex agent chooses an action at each choice point by examining + its alternatives via a state evaluation function. + + The code below is provided as a guide. You are welcome to change + it in any way you see fit, so long as you don't touch our method + headers. + """ + + + def getAction(self, gameState: GameState): + """ + You do not need to change this method, but you're welcome to. + + getAction chooses among the best options according to the evaluation function. + + Just like in the previous project, getAction takes a GameState and returns + some Directions.X for some X in the set {NORTH, SOUTH, WEST, EAST, STOP} + """ + # Collect legal moves and successor states + legalMoves = gameState.getLegalActions() + + # Choose one of the best actions + scores = [self.evaluationFunction(gameState, action) for action in legalMoves] + bestScore = max(scores) + bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore] + chosenIndex = random.choice(bestIndices) # Pick randomly among the best + + "Add more of your code here if you want to" + + return legalMoves[chosenIndex] + + def evaluationFunction(self, currentGameState: GameState, action): + """ + Design a better evaluation function here. + + The evaluation function takes in the current and proposed successor + GameStates (pacman.py) and returns a number, where higher numbers are better. + + The code below extracts some useful information from the state, like the + remaining food (newFood) and Pacman position after moving (newPos). + newScaredTimes holds the number of moves that each ghost will remain + scared because of Pacman having eaten a power pellet. + + Print out these variables to see what you're getting, then combine them + to create a masterful evaluation function. + """ + # Useful information you can extract from a GameState (pacman.py) + successorGameState = currentGameState.generatePacmanSuccessor(action) + newPos = successorGameState.getPacmanPosition() + newFood = successorGameState.getFood() + newGhostStates = successorGameState.getGhostStates() + newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates] + + "*** YOUR CODE HERE ***" + return successorGameState.getScore() + +def scoreEvaluationFunction(currentGameState: GameState): + """ + This default evaluation function just returns the score of the state. + The score is the same one displayed in the Pacman GUI. + + This evaluation function is meant for use with adversarial search agents + (not reflex agents). + """ + return currentGameState.getScore() + +class MultiAgentSearchAgent(Agent): + """ + This class provides some common elements to all of your + multi-agent searchers. Any methods defined here will be available + to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent. + + You *do not* need to make any changes here, but you can if you want to + add functionality to all your adversarial search agents. Please do not + remove anything, however. + + Note: this is an abstract class: one that should not be instantiated. It's + only partially specified, and designed to be extended. Agent (game.py) + is another abstract class. + """ + + def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'): + self.index = 0 # Pacman is always agent index 0 + self.evaluationFunction = util.lookup(evalFn, globals()) + self.depth = int(depth) + +class MinimaxAgent(MultiAgentSearchAgent): + """ + Your minimax agent (question 2) + """ + + def getAction(self, gameState: GameState): + """ + Returns the minimax action from the current gameState using self.depth + and self.evaluationFunction. + + Here are some method calls that might be useful when implementing minimax. + + gameState.getLegalActions(agentIndex): + Returns a list of legal actions for an agent + agentIndex=0 means Pacman, ghosts are >= 1 + + gameState.generateSuccessor(agentIndex, action): + Returns the successor game state after an agent takes an action + + gameState.getNumAgents(): + Returns the total number of agents in the game + + gameState.isWin(): + Returns whether or not the game state is a winning state + + gameState.isLose(): + Returns whether or not the game state is a losing state + """ + "*** YOUR CODE HERE ***" + util.raiseNotDefined() + +class AlphaBetaAgent(MultiAgentSearchAgent): + """ + Your minimax agent with alpha-beta pruning (question 3) + """ + + def getAction(self, gameState: GameState): + """ + Returns the minimax action using self.depth and self.evaluationFunction + """ + "*** YOUR CODE HERE ***" + util.raiseNotDefined() + +class ExpectimaxAgent(MultiAgentSearchAgent): + """ + Your expectimax agent (question 4) + """ + + def getAction(self, gameState: GameState): + """ + Returns the expectimax action using self.depth and self.evaluationFunction + + All ghosts should be modeled as choosing uniformly at random from their + legal moves. + """ + "*** YOUR CODE HERE ***" + util.raiseNotDefined() + +def betterEvaluationFunction(currentGameState: GameState): + """ + Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable + evaluation function (question 5). + + DESCRIPTION: + """ + "*** YOUR CODE HERE ***" + util.raiseNotDefined() + +# Abbreviation +better = betterEvaluationFunction diff --git a/multiagent/multiagentTestClasses.py b/multiagent/multiagentTestClasses.py new file mode 100644 index 0000000..4857f48 --- /dev/null +++ b/multiagent/multiagentTestClasses.py @@ -0,0 +1,578 @@ +# multiagentTestClasses.py +# ------------------------ +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +# A minimax tree which interfaces like gameState +# state.getNumAgents() +# state.isWin() +# state.isLose() +# state.generateSuccessor(agentIndex, action) +# state.getScore() +# used by multiAgents.scoreEvaluationFunction, which is the default +# +import testClasses +import json + +from collections import defaultdict +from pprint import PrettyPrinter +pp = PrettyPrinter() + +from game import Agent +from pacman import GameState +from ghostAgents import RandomGhost, DirectionalGhost +import random +import math +import traceback +import sys +import os +import layout +import pacman +import autograder +# import grading + +VERBOSE = False + + +class MultiagentTreeState(object): + def __init__(self, problem, state): + self.problem = problem + self.state = state + + def generateSuccessor(self, agentIndex, action): + if VERBOSE: + print("generateSuccessor(%s, %s, %s) -> %s" % (self.state, agentIndex, + action, self.problem.stateToSuccessorMap[self.state][action])) + successor = self.problem.stateToSuccessorMap[self.state][action] + self.problem.generatedStates.add(successor) + return MultiagentTreeState(self.problem, successor) + + def getScore(self): + if VERBOSE: + print("getScore(%s) -> %s" % + (self.state, self.problem.evaluation[self.state])) + if self.state not in self.problem.evaluation: + raise Exception( + 'getScore() called on non-terminal state or before maximum depth achieved.') + return float(self.problem.evaluation[self.state]) + + def getLegalActions(self, agentIndex=0): + if VERBOSE: + print("getLegalActions(%s) -> %s" % + (self.state, self.problem.stateToActions[self.state])) + # if len(self.problem.stateToActions[self.state]) == 0: + # print "WARNING: getLegalActions called on leaf state %s" % (self.state,) + return list(self.problem.stateToActions[self.state]) + + def isWin(self): + if VERBOSE: + print("isWin(%s) -> %s" % + (self.state, self.state in self.problem.winStates)) + return self.state in self.problem.winStates + + def isLose(self): + if VERBOSE: + print("isLose(%s) -> %s" % + (self.state, self.state in self.problem.loseStates)) + return self.state in self.problem.loseStates + + def getNumAgents(self): + if VERBOSE: + print("getNumAgents(%s) -> %s" % + (self.state, self.problem.numAgents)) + return self.problem.numAgents + + +class MultiagentTreeProblem(object): + def __init__(self, numAgents, startState, winStates, loseStates, successors, evaluation): + self.startState = MultiagentTreeState(self, startState) + + self.numAgents = numAgents + self.winStates = winStates + self.loseStates = loseStates + self.evaluation = evaluation + self.successors = successors + + self.reset() + + self.stateToSuccessorMap = defaultdict(dict) + self.stateToActions = defaultdict(list) + for state, action, nextState in successors: + self.stateToActions[state].append(action) + self.stateToSuccessorMap[state][action] = nextState + + def reset(self): + self.generatedStates = set([self.startState.state]) + + +def parseTreeProblem(testDict): + numAgents = int(testDict["num_agents"]) + startState = testDict["start_state"] + winStates = set(testDict["win_states"].split(" ")) + loseStates = set(testDict["lose_states"].split(" ")) + successors = [] + + evaluation = {} + for line in testDict["evaluation"].split('\n'): + tokens = line.split() + if len(tokens) == 2: + state, value = tokens + evaluation[state] = float(value) + else: + raise Exception("[parseTree] Bad evaluation line: |%s|" % (line,)) + + for line in testDict["successors"].split('\n'): + tokens = line.split() + if len(tokens) == 3: + state, action, nextState = tokens + successors.append((state, action, nextState)) + else: + raise Exception("[parseTree] Bad successor line: |%s|" % (line,)) + + return MultiagentTreeProblem(numAgents, startState, winStates, loseStates, successors, evaluation) + + +def run(lay, layName, pac, ghosts, disp, nGames=1, name='games'): + """ + Runs a few games and outputs their statistics. + """ + starttime = time.time() + print('*** Running %s on' % name, layName, '%d time(s).' % nGames) + games = pacman.runGames(lay, pac, ghosts, disp, + nGames, False, catchExceptions=True, timeout=120) + print('*** Finished running %s on' % name, layName, + 'after %d seconds.' % (time.time() - starttime)) + stats = {'time': time.time() - starttime, 'wins': [g.state.isWin() for g in games].count(True), 'games': games, 'scores': [g.state.getScore() for g in games], + 'timeouts': [g.agentTimeout for g in games].count(True), 'crashes': [g.agentCrashed for g in games].count(True)} + print('*** Won %d out of %d games. Average score: %f ***' % + (stats['wins'], len(games), sum(stats['scores']) * 1.0 / len(games))) + return stats + + +class GradingAgent(Agent): + def __init__(self, seed, studentAgent, optimalActions, altDepthActions, partialPlyBugActions): + # save student agent and actions of refernce agents + self.studentAgent = studentAgent + self.optimalActions = optimalActions + self.altDepthActions = altDepthActions + self.partialPlyBugActions = partialPlyBugActions + # create fields for storing specific wrong actions + self.suboptimalMoves = [] + self.wrongStatesExplored = -1 + # boolean vectors represent types of implementation the student could have + self.actionsConsistentWithOptimal = [ + True for i in range(len(optimalActions[0]))] + self.actionsConsistentWithAlternativeDepth = [ + True for i in range(len(altDepthActions[0]))] + self.actionsConsistentWithPartialPlyBug = [ + True for i in range(len(partialPlyBugActions[0]))] + # keep track of elapsed moves + self.stepCount = 0 + self.seed = seed + + def registerInitialState(self, state): + if 'registerInitialState' in dir(self.studentAgent): + self.studentAgent.registerInitialState(state) + random.seed(self.seed) + + def getAction(self, state): + GameState.getAndResetExplored() + studentAction = (self.studentAgent.getAction(state), + len(GameState.getAndResetExplored())) + optimalActions = self.optimalActions[self.stepCount] + altDepthActions = self.altDepthActions[self.stepCount] + partialPlyBugActions = self.partialPlyBugActions[self.stepCount] + studentOptimalAction = False + curRightStatesExplored = False + for i in range(len(optimalActions)): + if studentAction[0] in optimalActions[i][0]: + studentOptimalAction = True + else: + self.actionsConsistentWithOptimal[i] = False + if studentAction[1] == int(optimalActions[i][1]): + curRightStatesExplored = True + if not curRightStatesExplored and self.wrongStatesExplored < 0: + self.wrongStatesExplored = 1 + for i in range(len(altDepthActions)): + if studentAction[0] not in altDepthActions[i]: + self.actionsConsistentWithAlternativeDepth[i] = False + for i in range(len(partialPlyBugActions)): + if studentAction[0] not in partialPlyBugActions[i]: + self.actionsConsistentWithPartialPlyBug[i] = False + if not studentOptimalAction: + self.suboptimalMoves.append( + (state, studentAction[0], optimalActions[0][0][0])) + self.stepCount += 1 + random.seed(self.seed + self.stepCount) + return optimalActions[0][0][0] + + def getSuboptimalMoves(self): + return self.suboptimalMoves + + def getWrongStatesExplored(self): + return self.wrongStatesExplored + + def checkFailure(self): + """ + Return +n if have n suboptimal moves. + Return -1 if have only off by one depth moves. + Return 0 otherwise. + """ + if self.wrongStatesExplored > 0: + return -3 + if self.actionsConsistentWithOptimal.count(True) > 0: + return 0 + elif self.actionsConsistentWithPartialPlyBug.count(True) > 0: + return -2 + elif self.actionsConsistentWithAlternativeDepth.count(True) > 0: + return -1 + else: + return len(self.suboptimalMoves) + + +class PolyAgent(Agent): + def __init__(self, seed, multiAgents, ourPacOptions, depth): + # prepare our pacman agents + solutionAgents, alternativeDepthAgents, partialPlyBugAgents = self.construct_our_pacs( + multiAgents, ourPacOptions) + for p in solutionAgents: + p.depth = depth + for p in partialPlyBugAgents: + p.depth = depth + for p in alternativeDepthAgents[:2]: + p.depth = max(1, depth - 1) + for p in alternativeDepthAgents[2:]: + p.depth = depth + 1 + self.solutionAgents = solutionAgents + self.alternativeDepthAgents = alternativeDepthAgents + self.partialPlyBugAgents = partialPlyBugAgents + # prepare fields for storing the results + self.optimalActionLists = [] + self.alternativeDepthLists = [] + self.partialPlyBugLists = [] + self.seed = seed + self.stepCount = 0 + + def select(self, list, indices): + """ + Return a sublist of elements given by indices in list. + """ + return [list[i] for i in indices] + + def construct_our_pacs(self, multiAgents, keyword_dict): + pacs_without_stop = [multiAgents.StaffMultiAgentSearchAgent( + **keyword_dict) for i in range(3)] + keyword_dict['keepStop'] = 'True' + pacs_with_stop = [multiAgents.StaffMultiAgentSearchAgent( + **keyword_dict) for i in range(3)] + keyword_dict['usePartialPlyBug'] = 'True' + partial_ply_bug_pacs = [ + multiAgents.StaffMultiAgentSearchAgent(**keyword_dict)] + keyword_dict['keepStop'] = 'False' + partial_ply_bug_pacs = partial_ply_bug_pacs + \ + [multiAgents.StaffMultiAgentSearchAgent(**keyword_dict)] + for pac in pacs_with_stop + pacs_without_stop + partial_ply_bug_pacs: + pac.verbose = False + ourpac = [pacs_with_stop[0], pacs_without_stop[0]] + alternative_depth_pacs = self.select( + pacs_with_stop + pacs_without_stop, [1, 4, 2, 5]) + return (ourpac, alternative_depth_pacs, partial_ply_bug_pacs) + + def registerInitialState(self, state): + for agent in self.solutionAgents + self.alternativeDepthAgents: + if 'registerInitialState' in dir(agent): + agent.registerInitialState(state) + random.seed(self.seed) + + def getAction(self, state): + # survey agents + GameState.getAndResetExplored() + optimalActionLists = [] + for agent in self.solutionAgents: + optimalActionLists.append((agent.getBestPacmanActions( + state)[0], len(GameState.getAndResetExplored()))) + alternativeDepthLists = [agent.getBestPacmanActions( + state)[0] for agent in self.alternativeDepthAgents] + partialPlyBugLists = [agent.getBestPacmanActions( + state)[0] for agent in self.partialPlyBugAgents] + # record responses + self.optimalActionLists.append(optimalActionLists) + self.alternativeDepthLists.append(alternativeDepthLists) + self.partialPlyBugLists.append(partialPlyBugLists) + self.stepCount += 1 + random.seed(self.seed + self.stepCount) + return optimalActionLists[0][0][0] + + def getTraces(self): + # return traces from individual agents + return (self.optimalActionLists, self.alternativeDepthLists, self.partialPlyBugLists) + + +class PacmanGameTreeTest(testClasses.TestCase): + + def __init__(self, question, testDict): + super(PacmanGameTreeTest, self).__init__(question, testDict) + self.seed = int(self.testDict['seed']) + self.alg = self.testDict['alg'] + self.layout_text = self.testDict['layout'] + self.layout_name = self.testDict['layoutName'] + self.depth = int(self.testDict['depth']) + self.max_points = int(self.testDict['max_points']) + + def execute(self, grades, moduleDict, solutionDict): + # load student code and staff code solutions + multiAgents = moduleDict['multiAgents'] + studentAgent = getattr(multiAgents, self.alg)(depth=self.depth) + allActions = [json.loads(x) + for x in solutionDict['optimalActions'].split('\n')] + altDepthActions = [json.loads( + x) for x in solutionDict['altDepthActions'].split('\n')] + partialPlyBugActions = [json.loads( + x) for x in solutionDict['partialPlyBugActions'].split('\n')] + # set up game state and play a game + random.seed(self.seed) + lay = layout.Layout([l.strip() for l in self.layout_text.split('\n')]) + pac = GradingAgent(self.seed, studentAgent, allActions, + altDepthActions, partialPlyBugActions) + # check return codes and assign grades + disp = self.question.getDisplay() + stats = run(lay, self.layout_name, pac, [DirectionalGhost( + i + 1) for i in range(2)], disp, name=self.alg) + if stats['timeouts'] > 0: + self.addMessage('Agent timed out on smallClassic. No credit') + return self.testFail(grades) + if stats['crashes'] > 0: + self.addMessage('Agent crashed on smallClassic. No credit') + return self.testFail(grades) + code = pac.checkFailure() + if code == 0: + return self.testPass(grades) + elif code == -3: + if pac.getWrongStatesExplored() >= 0: + self.addMessage('Bug: Wrong number of states expanded.') + return self.testFail(grades) + else: + return self.testPass(grades) + elif code == -2: + self.addMessage('Bug: Partial Ply Bug') + return self.testFail(grades) + elif code == -1: + self.addMessage('Bug: Search depth off by 1') + return self.testFail(grades) + elif code > 0: + moves = pac.getSuboptimalMoves() + state, studentMove, optMove = random.choice(moves) + self.addMessage('Bug: Suboptimal moves') + self.addMessage('State:%s\nStudent Move:%s\nOptimal Move:%s' % ( + state, studentMove, optMove)) + return self.testFail(grades) + + def writeList(self, handle, name, list): + handle.write('%s: """\n' % name) + for l in list: + handle.write('%s\n' % json.dumps(l)) + handle.write('"""\n') + + def writeSolution(self, moduleDict, filePath): + # load module, set seed, create ghosts and macman, run game + multiAgents = moduleDict['multiAgents'] + random.seed(self.seed) + lay = layout.Layout([l.strip() for l in self.layout_text.split('\n')]) + if self.alg == 'ExpectimaxAgent': + ourPacOptions = {'expectimax': 'True'} + elif self.alg == 'AlphaBetaAgent': + ourPacOptions = {'alphabeta': 'True'} + else: + ourPacOptions = {} + pac = PolyAgent(self.seed, multiAgents, ourPacOptions, self.depth) + disp = self.question.getDisplay() + run(lay, self.layout_name, pac, [DirectionalGhost( + i + 1) for i in range(2)], disp, name=self.alg) + (optimalActions, altDepthActions, partialPlyBugActions) = pac.getTraces() + # recover traces and record to file + handle = open(filePath, 'w') + self.writeList(handle, 'optimalActions', optimalActions) + self.writeList(handle, 'altDepthActions', altDepthActions) + self.writeList(handle, 'partialPlyBugActions', partialPlyBugActions) + handle.close() + + +class GraphGameTreeTest(testClasses.TestCase): + + def __init__(self, question, testDict): + super(GraphGameTreeTest, self).__init__(question, testDict) + self.problem = parseTreeProblem(testDict) + self.alg = self.testDict['alg'] + self.diagram = self.testDict['diagram'].split('\n') + self.depth = int(self.testDict['depth']) + + def solveProblem(self, multiAgents): + self.problem.reset() + studentAgent = getattr(multiAgents, self.alg)(depth=self.depth) + action = studentAgent.getAction(self.problem.startState) + generated = self.problem.generatedStates + return action, " ".join([str(s) for s in sorted(generated)]) + + def addDiagram(self): + self.addMessage('Tree:') + for line in self.diagram: + self.addMessage(line) + + def execute(self, grades, moduleDict, solutionDict): + multiAgents = moduleDict['multiAgents'] + goldAction = solutionDict['action'] + goldGenerated = solutionDict['generated'] + action, generated = self.solveProblem(multiAgents) + + fail = False + if action != goldAction: + self.addMessage('Incorrect move for depth=%s' % (self.depth,)) + self.addMessage( + ' Student move: %s\n Optimal move: %s' % (action, goldAction)) + fail = True + + if generated != goldGenerated: + self.addMessage( + 'Incorrect generated nodes for depth=%s' % (self.depth,)) + self.addMessage(' Student generated nodes: %s\n Correct generated nodes: %s' % ( + generated, goldGenerated)) + fail = True + + if fail: + self.addDiagram() + return self.testFail(grades) + else: + return self.testPass(grades) + + def writeSolution(self, moduleDict, filePath): + multiAgents = moduleDict['multiAgents'] + action, generated = self.solveProblem(multiAgents) + with open(filePath, 'w') as handle: + handle.write('# This is the solution file for %s.\n' % self.path) + handle.write('action: "%s"\n' % (action,)) + handle.write('generated: "%s"\n' % (generated,)) + return True + + +import time +from util import TimeoutFunction + + +class EvalAgentTest(testClasses.TestCase): + + def __init__(self, question, testDict): + super(EvalAgentTest, self).__init__(question, testDict) + self.layoutName = testDict['layoutName'] + self.agentName = testDict['agentName'] + self.ghosts = eval(testDict['ghosts']) + self.maxTime = int(testDict['maxTime']) + self.seed = int(testDict['randomSeed']) + self.numGames = int(testDict['numGames']) + + self.scoreMinimum = int( + testDict['scoreMinimum']) if 'scoreMinimum' in testDict else None + self.nonTimeoutMinimum = int( + testDict['nonTimeoutMinimum']) if 'nonTimeoutMinimum' in testDict else None + self.winsMinimum = int( + testDict['winsMinimum']) if 'winsMinimum' in testDict else None + + self.scoreThresholds = [int(s) for s in testDict.get( + 'scoreThresholds', '').split()] + self.nonTimeoutThresholds = [int(s) for s in testDict.get( + 'nonTimeoutThresholds', '').split()] + self.winsThresholds = [int(s) for s in testDict.get( + 'winsThresholds', '').split()] + + self.maxPoints = sum([len(t) for t in [ + self.scoreThresholds, self.nonTimeoutThresholds, self.winsThresholds]]) + self.agentArgs = testDict.get('agentArgs', '') + + def execute(self, grades, moduleDict, solutionDict): + startTime = time.time() + + agentType = getattr(moduleDict['multiAgents'], self.agentName) + agentOpts = pacman.parseAgentArgs( + self.agentArgs) if self.agentArgs != '' else {} + agent = agentType(**agentOpts) + + lay = layout.getLayout(self.layoutName, 3) + + disp = self.question.getDisplay() + + random.seed(self.seed) + games = pacman.runGames(lay, agent, self.ghosts, disp, self.numGames, + False, catchExceptions=True, timeout=self.maxTime) + totalTime = time.time() - startTime + + stats = {'time': totalTime, 'wins': [g.state.isWin() for g in games].count(True), + 'games': games, 'scores': [g.state.getScore() for g in games], + 'timeouts': [g.agentTimeout for g in games].count(True), 'crashes': [g.agentCrashed for g in games].count(True)} + + averageScore = sum(stats['scores']) / float(len(stats['scores'])) + nonTimeouts = self.numGames - stats['timeouts'] + wins = stats['wins'] + + def gradeThreshold(value, minimum, thresholds, name): + points = 0 + passed = (minimum == None) or (value >= minimum) + if passed: + for t in thresholds: + if value >= t: + points += 1 + return (passed, points, value, minimum, thresholds, name) + + results = [gradeThreshold(averageScore, self.scoreMinimum, self.scoreThresholds, "average score"), + gradeThreshold(nonTimeouts, self.nonTimeoutMinimum, + self.nonTimeoutThresholds, "games not timed out"), + gradeThreshold(wins, self.winsMinimum, self.winsThresholds, "wins")] + + totalPoints = 0 + for passed, points, value, minimum, thresholds, name in results: + if minimum == None and len(thresholds) == 0: + continue + + # print passed, points, value, minimum, thresholds, name + totalPoints += points + if not passed: + assert points == 0 + self.addMessage( + "%s %s (fail: below minimum value %s)" % (value, name, minimum)) + else: + self.addMessage("%s %s (%s of %s points)" % + (value, name, points, len(thresholds))) + + if minimum != None: + self.addMessage(" Grading scheme:") + self.addMessage(" < %s: fail" % (minimum,)) + if len(thresholds) == 0 or minimum != thresholds[0]: + self.addMessage(" >= %s: 0 points" % (minimum,)) + for idx, threshold in enumerate(thresholds): + self.addMessage(" >= %s: %s points" % + (threshold, idx+1)) + elif len(thresholds) > 0: + self.addMessage(" Grading scheme:") + self.addMessage(" < %s: 0 points" % (thresholds[0],)) + for idx, threshold in enumerate(thresholds): + self.addMessage(" >= %s: %s points" % + (threshold, idx+1)) + + if any([not passed for passed, _, _, _, _, _ in results]): + totalPoints = 0 + + return self.testPartial(grades, totalPoints, self.maxPoints) + + def writeSolution(self, moduleDict, filePath): + handle = open(filePath, 'w') + handle.write('# This is the solution file for %s.\n' % self.path) + handle.write('# File intentionally blank.\n') + handle.close() + return True diff --git a/multiagent/pacman.py b/multiagent/pacman.py new file mode 100644 index 0000000..37fe800 --- /dev/null +++ b/multiagent/pacman.py @@ -0,0 +1,738 @@ +# pacman.py +# --------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +""" +Pacman.py holds the logic for the classic pacman game along with the main +code to run a game. This file is divided into three sections: + + (i) Your interface to the pacman world: + Pacman is a complex environment. You probably don't want to + read through all of the code we wrote to make the game runs + correctly. This section contains the parts of the code + that you will need to understand in order to complete the + project. There is also some code in game.py that you should + understand. + + (ii) The hidden secrets of pacman: + This section contains all of the logic code that the pacman + environment uses to decide who can move where, who dies when + things collide, etc. You shouldn't need to read this section + of code, but you can if you want. + + (iii) Framework to start a game: + The final section contains the code for reading the command + you use to set up the game, then starting up a new game, along with + linking in all the external parts (agent functions, graphics). + Check this section out to see all the options available to you. + +To play your first game, type 'python pacman.py' from the command line. +The keys are 'a', 's', 'd', and 'w' to move (or arrow keys). Have fun! +""" +from game import GameStateData +from game import Game +from game import Directions +from game import Actions +from util import nearestPoint +from util import manhattanDistance +import util +import layout +import sys +import types +import time +import random +import os + +################################################### +# YOUR INTERFACE TO THE PACMAN WORLD: A GameState # +################################################### + + +class GameState: + """ + A GameState specifies the full game state, including the food, capsules, + agent configurations and score changes. + + GameStates are used by the Game object to capture the actual state of the game and + can be used by agents to reason about the game. + + Much of the information in a GameState is stored in a GameStateData object. We + strongly suggest that you access that data via the accessor methods below rather + than referring to the GameStateData object directly. + + Note that in classic Pacman, Pacman is always agent 0. + """ + + #################################################### + # Accessor methods: use these to access state data # + #################################################### + + # static variable keeps track of which states have had getLegalActions called + explored = set() + + def getAndResetExplored(): + tmp = GameState.explored.copy() + GameState.explored = set() + return tmp + getAndResetExplored = staticmethod(getAndResetExplored) + + def getLegalActions(self, agentIndex=0): + """ + Returns the legal actions for the agent specified. + """ +# GameState.explored.add(self) + if self.isWin() or self.isLose(): + return [] + + if agentIndex == 0: # Pacman is moving + return PacmanRules.getLegalActions(self) + else: + return GhostRules.getLegalActions(self, agentIndex) + + def generateSuccessor(self, agentIndex, action): + """ + Returns the successor state after the specified agent takes the action. + """ + # Check that successors exist + if self.isWin() or self.isLose(): + raise Exception('Can\'t generate a successor of a terminal state.') + + # Copy current state + state = GameState(self) + + # Let agent's logic deal with its action's effects on the board + if agentIndex == 0: # Pacman is moving + state.data._eaten = [False for i in range(state.getNumAgents())] + PacmanRules.applyAction(state, action) + else: # A ghost is moving + GhostRules.applyAction(state, action, agentIndex) + + # Time passes + if agentIndex == 0: + state.data.scoreChange += -TIME_PENALTY # Penalty for waiting around + else: + GhostRules.decrementTimer(state.data.agentStates[agentIndex]) + + # Resolve multi-agent effects + GhostRules.checkDeath(state, agentIndex) + + # Book keeping + state.data._agentMoved = agentIndex + state.data.score += state.data.scoreChange + GameState.explored.add(self) + GameState.explored.add(state) + return state + + def getLegalPacmanActions(self): + return self.getLegalActions(0) + + def generatePacmanSuccessor(self, action): + """ + Generates the successor state after the specified pacman move + """ + return self.generateSuccessor(0, action) + + def getPacmanState(self): + """ + Returns an AgentState object for pacman (in game.py) + + state.pos gives the current position + state.direction gives the travel vector + """ + return self.data.agentStates[0].copy() + + def getPacmanPosition(self): + return self.data.agentStates[0].getPosition() + + def getGhostStates(self): + return self.data.agentStates[1:] + + def getGhostState(self, agentIndex): + if agentIndex == 0 or agentIndex >= self.getNumAgents(): + raise Exception("Invalid index passed to getGhostState") + return self.data.agentStates[agentIndex] + + def getGhostPosition(self, agentIndex): + if agentIndex == 0: + raise Exception("Pacman's index passed to getGhostPosition") + return self.data.agentStates[agentIndex].getPosition() + + def getGhostPositions(self): + return [s.getPosition() for s in self.getGhostStates()] + + def getNumAgents(self): + return len(self.data.agentStates) + + def getScore(self): + return float(self.data.score) + + def getCapsules(self): + """ + Returns a list of positions (x,y) of the remaining capsules. + """ + return self.data.capsules + + def getNumFood(self): + return self.data.food.count() + + def getFood(self): + """ + Returns a Grid of boolean food indicator variables. + + Grids can be accessed via list notation, so to check + if there is food at (x,y), just call + + currentFood = state.getFood() + if currentFood[x][y] == True: ... + """ + return self.data.food + + def getWalls(self): + """ + Returns a Grid of boolean wall indicator variables. + + Grids can be accessed via list notation, so to check + if there is a wall at (x,y), just call + + walls = state.getWalls() + if walls[x][y] == True: ... + """ + return self.data.layout.walls + + def hasFood(self, x, y): + return self.data.food[x][y] + + def hasWall(self, x, y): + return self.data.layout.walls[x][y] + + def isLose(self): + return self.data._lose + + def isWin(self): + return self.data._win + + ############################################# + # Helper methods: # + # You shouldn't need to call these directly # + ############################################# + + def __init__(self, prevState=None): + """ + Generates a new state by copying information from its predecessor. + """ + if prevState != None: # Initial state + self.data = GameStateData(prevState.data) + else: + self.data = GameStateData() + + def deepCopy(self): + state = GameState(self) + state.data = self.data.deepCopy() + return state + + def __eq__(self, other): + """ + Allows two states to be compared. + """ + return hasattr(other, 'data') and self.data == other.data + + def __hash__(self): + """ + Allows states to be keys of dictionaries. + """ + return hash(self.data) + + def __str__(self): + + return str(self.data) + + def initialize(self, layout, numGhostAgents=1000): + """ + Creates an initial game state from a layout array (see layout.py). + """ + self.data.initialize(layout, numGhostAgents) + +############################################################################ +# THE HIDDEN SECRETS OF PACMAN # +# # +# You shouldn't need to look through the code in this section of the file. # +############################################################################ + + +SCARED_TIME = 40 # Moves ghosts are scared +COLLISION_TOLERANCE = 0.7 # How close ghosts must be to Pacman to kill +TIME_PENALTY = 1 # Number of points lost each round + + +class ClassicGameRules: + """ + These game rules manage the control flow of a game, deciding when + and how the game starts and ends. + """ + + def __init__(self, timeout=30): + self.timeout = timeout + + def newGame(self, layout, pacmanAgent, ghostAgents, display, quiet=False, catchExceptions=False): + agents = [pacmanAgent] + ghostAgents[:layout.getNumGhosts()] + initState = GameState() + initState.initialize(layout, len(ghostAgents)) + game = Game(agents, display, self, catchExceptions=catchExceptions) + game.state = initState + self.initialState = initState.deepCopy() + self.quiet = quiet + return game + + def process(self, state, game): + """ + Checks to see whether it is time to end the game. + """ + if state.isWin(): + self.win(state, game) + if state.isLose(): + self.lose(state, game) + + def win(self, state, game): + if not self.quiet: + print("Pacman emerges victorious! Score: %d" % state.data.score) + game.gameOver = True + + def lose(self, state, game): + if not self.quiet: + print("Pacman died! Score: %d" % state.data.score) + game.gameOver = True + + def getProgress(self, game): + return float(game.state.getNumFood()) / self.initialState.getNumFood() + + def agentCrash(self, game, agentIndex): + if agentIndex == 0: + print("Pacman crashed") + else: + print("A ghost crashed") + + def getMaxTotalTime(self, agentIndex): + return self.timeout + + def getMaxStartupTime(self, agentIndex): + return self.timeout + + def getMoveWarningTime(self, agentIndex): + return self.timeout + + def getMoveTimeout(self, agentIndex): + return self.timeout + + def getMaxTimeWarnings(self, agentIndex): + return 0 + + +class PacmanRules: + """ + These functions govern how pacman interacts with his environment under + the classic game rules. + """ + PACMAN_SPEED = 1 + + def getLegalActions(state): + """ + Returns a list of possible actions. + """ + return Actions.getPossibleActions(state.getPacmanState().configuration, state.data.layout.walls) + getLegalActions = staticmethod(getLegalActions) + + def applyAction(state, action): + """ + Edits the state to reflect the results of the action. + """ + legal = PacmanRules.getLegalActions(state) + if action not in legal: + raise Exception("Illegal action " + str(action)) + + pacmanState = state.data.agentStates[0] + + # Update Configuration + vector = Actions.directionToVector(action, PacmanRules.PACMAN_SPEED) + pacmanState.configuration = pacmanState.configuration.generateSuccessor( + vector) + + # Eat + next = pacmanState.configuration.getPosition() + nearest = nearestPoint(next) + if manhattanDistance(nearest, next) <= 0.5: + # Remove food + PacmanRules.consume(nearest, state) + applyAction = staticmethod(applyAction) + + def consume(position, state): + x, y = position + # Eat food + if state.data.food[x][y]: + state.data.scoreChange += 10 + state.data.food = state.data.food.copy() + state.data.food[x][y] = False + state.data._foodEaten = position + # TODO: cache numFood? + numFood = state.getNumFood() + if numFood == 0 and not state.data._lose: + state.data.scoreChange += 500 + state.data._win = True + # Eat capsule + if(position in state.getCapsules()): + state.data.capsules.remove(position) + state.data._capsuleEaten = position + # Reset all ghosts' scared timers + for index in range(1, len(state.data.agentStates)): + state.data.agentStates[index].scaredTimer = SCARED_TIME + consume = staticmethod(consume) + + +class GhostRules: + """ + These functions dictate how ghosts interact with their environment. + """ + GHOST_SPEED = 1.0 + + def getLegalActions(state, ghostIndex): + """ + Ghosts cannot stop, and cannot turn around unless they + reach a dead end, but can turn 90 degrees at intersections. + """ + conf = state.getGhostState(ghostIndex).configuration + possibleActions = Actions.getPossibleActions( + conf, state.data.layout.walls) + reverse = Actions.reverseDirection(conf.direction) + if Directions.STOP in possibleActions: + possibleActions.remove(Directions.STOP) + if reverse in possibleActions and len(possibleActions) > 1: + possibleActions.remove(reverse) + return possibleActions + getLegalActions = staticmethod(getLegalActions) + + def applyAction(state, action, ghostIndex): + + legal = GhostRules.getLegalActions(state, ghostIndex) + if action not in legal: + raise Exception("Illegal ghost action " + str(action)) + + ghostState = state.data.agentStates[ghostIndex] + speed = GhostRules.GHOST_SPEED + if ghostState.scaredTimer > 0: + speed /= 2.0 + vector = Actions.directionToVector(action, speed) + ghostState.configuration = ghostState.configuration.generateSuccessor( + vector) + applyAction = staticmethod(applyAction) + + def decrementTimer(ghostState): + timer = ghostState.scaredTimer + if timer == 1: + ghostState.configuration.pos = nearestPoint( + ghostState.configuration.pos) + ghostState.scaredTimer = max(0, timer - 1) + decrementTimer = staticmethod(decrementTimer) + + def checkDeath(state, agentIndex): + pacmanPosition = state.getPacmanPosition() + if agentIndex == 0: # Pacman just moved; Anyone can kill him + for index in range(1, len(state.data.agentStates)): + ghostState = state.data.agentStates[index] + ghostPosition = ghostState.configuration.getPosition() + if GhostRules.canKill(pacmanPosition, ghostPosition): + GhostRules.collide(state, ghostState, index) + else: + ghostState = state.data.agentStates[agentIndex] + ghostPosition = ghostState.configuration.getPosition() + if GhostRules.canKill(pacmanPosition, ghostPosition): + GhostRules.collide(state, ghostState, agentIndex) + checkDeath = staticmethod(checkDeath) + + def collide(state, ghostState, agentIndex): + if ghostState.scaredTimer > 0: + state.data.scoreChange += 200 + GhostRules.placeGhost(state, ghostState) + ghostState.scaredTimer = 0 + # Added for first-person + state.data._eaten[agentIndex] = True + else: + if not state.data._win: + state.data.scoreChange -= 500 + state.data._lose = True + collide = staticmethod(collide) + + def canKill(pacmanPosition, ghostPosition): + return manhattanDistance(ghostPosition, pacmanPosition) <= COLLISION_TOLERANCE + canKill = staticmethod(canKill) + + def placeGhost(state, ghostState): + ghostState.configuration = ghostState.start + placeGhost = staticmethod(placeGhost) + +############################# +# FRAMEWORK TO START A GAME # +############################# + + +def default(str): + return str + ' [Default: %default]' + + +def parseAgentArgs(str): + if str == None: + return {} + pieces = str.split(',') + opts = {} + for p in pieces: + if '=' in p: + key, val = p.split('=') + else: + key, val = p, 1 + opts[key] = val + return opts + + +def readCommand(argv): + """ + Processes the command used to run pacman from the command line. + """ + from optparse import OptionParser + usageStr = """ + USAGE: python pacman.py + EXAMPLES: (1) python pacman.py + - starts an interactive game + (2) python pacman.py --layout smallClassic --zoom 2 + OR python pacman.py -l smallClassic -z 2 + - starts an interactive game on a smaller board, zoomed in + """ + parser = OptionParser(usageStr) + + parser.add_option('-n', '--numGames', dest='numGames', type='int', + help=default('the number of GAMES to play'), metavar='GAMES', default=1) + parser.add_option('-l', '--layout', dest='layout', + help=default( + 'the LAYOUT_FILE from which to load the map layout'), + metavar='LAYOUT_FILE', default='mediumClassic') + parser.add_option('-p', '--pacman', dest='pacman', + help=default( + 'the agent TYPE in the pacmanAgents module to use'), + metavar='TYPE', default='KeyboardAgent') + parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics', + help='Display output as text only', default=False) + parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics', + help='Generate minimal output and no graphics', default=False) + parser.add_option('-g', '--ghosts', dest='ghost', + help=default( + 'the ghost agent TYPE in the ghostAgents module to use'), + metavar='TYPE', default='RandomGhost') + parser.add_option('-k', '--numghosts', type='int', dest='numGhosts', + help=default('The maximum number of ghosts to use'), default=4) + parser.add_option('-z', '--zoom', type='float', dest='zoom', + help=default('Zoom the size of the graphics window'), default=1.0) + parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed', + help='Fixes the random seed to always play the same game', default=False) + parser.add_option('-r', '--recordActions', action='store_true', dest='record', + help='Writes game histories to a file (named by the time they were played)', default=False) + parser.add_option('--replay', dest='gameToReplay', + help='A recorded game file (pickle) to replay', default=None) + parser.add_option('-a', '--agentArgs', dest='agentArgs', + help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"') + parser.add_option('-x', '--numTraining', dest='numTraining', type='int', + help=default('How many episodes are training (suppresses output)'), default=0) + parser.add_option('--frameTime', dest='frameTime', type='float', + help=default('Time to delay between frames; <0 means keyboard'), default=0.1) + parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions', + help='Turns on exception handling and timeouts during games', default=False) + parser.add_option('--timeout', dest='timeout', type='int', + help=default('Maximum length of time an agent can spend computing in a single game'), default=30) + + options, otherjunk = parser.parse_args(argv) + if len(otherjunk) != 0: + raise Exception('Command line input not understood: ' + str(otherjunk)) + args = dict() + + # Fix the random seed + if options.fixRandomSeed: + random.seed('cs188') + + # Choose a layout + args['layout'] = layout.getLayout(options.layout) + if args['layout'] == None: + raise Exception("The layout " + options.layout + " cannot be found") + + # Choose a Pacman agent + noKeyboard = options.gameToReplay == None and ( + options.textGraphics or options.quietGraphics) + pacmanType = loadAgent(options.pacman, noKeyboard) + agentOpts = parseAgentArgs(options.agentArgs) + if options.numTraining > 0: + args['numTraining'] = options.numTraining + if 'numTraining' not in agentOpts: + agentOpts['numTraining'] = options.numTraining + pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs + args['pacman'] = pacman + + # Don't display training games + if 'numTrain' in agentOpts: + options.numQuiet = int(agentOpts['numTrain']) + options.numIgnore = int(agentOpts['numTrain']) + + # Choose a ghost agent + ghostType = loadAgent(options.ghost, noKeyboard) + args['ghosts'] = [ghostType(i+1) for i in range(options.numGhosts)] + + # Choose a display format + if options.quietGraphics: + import textDisplay + args['display'] = textDisplay.NullGraphics() + elif options.textGraphics: + import textDisplay + textDisplay.SLEEP_TIME = options.frameTime + args['display'] = textDisplay.PacmanGraphics() + else: + import graphicsDisplay + args['display'] = graphicsDisplay.PacmanGraphics( + options.zoom, frameTime=options.frameTime) + args['numGames'] = options.numGames + args['record'] = options.record + args['catchExceptions'] = options.catchExceptions + args['timeout'] = options.timeout + + # Special case: recorded games don't use the runGames method or args structure + if options.gameToReplay != None: + print('Replaying recorded game %s.' % options.gameToReplay) + import pickle + f = open(options.gameToReplay) + try: + recorded = pickle.load(f) + finally: + f.close() + recorded['display'] = args['display'] + replayGame(**recorded) + sys.exit(0) + + return args + + +def loadAgent(pacman, nographics): + # Looks through all pythonPath Directories for the right module, + pythonPathStr = os.path.expandvars("$PYTHONPATH") + if pythonPathStr.find(';') == -1: + pythonPathDirs = pythonPathStr.split(':') + else: + pythonPathDirs = pythonPathStr.split(';') + pythonPathDirs.append('.') + + for moduleDir in pythonPathDirs: + if not os.path.isdir(moduleDir): + continue + moduleNames = [f for f in os.listdir( + moduleDir) if f.endswith('gents.py')] + for modulename in moduleNames: + try: + module = __import__(modulename[:-3]) + except ImportError: + continue + if pacman in dir(module): + if nographics and modulename == 'keyboardAgents.py': + raise Exception( + 'Using the keyboard requires graphics (not text display)') + return getattr(module, pacman) + raise Exception('The agent ' + pacman + + ' is not specified in any *Agents.py.') + + +def replayGame(layout, actions, display): + import pacmanAgents + import ghostAgents + rules = ClassicGameRules() + agents = [pacmanAgents.GreedyAgent()] + [ghostAgents.RandomGhost(i+1) + for i in range(layout.getNumGhosts())] + game = rules.newGame(layout, agents[0], agents[1:], display) + state = game.state + display.initialize(state.data) + + for action in actions: + # Execute the action + state = state.generateSuccessor(*action) + # Change the display + display.update(state.data) + # Allow for game specific conditions (winning, losing, etc.) + rules.process(state, game) + + display.finish() + + +def runGames(layout, pacman, ghosts, display, numGames, record, numTraining=0, catchExceptions=False, timeout=30): + import __main__ + __main__.__dict__['_display'] = display + + rules = ClassicGameRules(timeout) + games = [] + + for i in range(numGames): + beQuiet = i < numTraining + if beQuiet: + # Suppress output and graphics + import textDisplay + gameDisplay = textDisplay.NullGraphics() + rules.quiet = True + else: + gameDisplay = display + rules.quiet = False + game = rules.newGame(layout, pacman, ghosts, + gameDisplay, beQuiet, catchExceptions) + game.run() + if not beQuiet: + games.append(game) + + if record: + import time + import pickle + fname = ('recorded-game-%d' % (i + 1)) + \ + '-'.join([str(t) for t in time.localtime()[1:6]]) + f = file(fname, 'w') + components = {'layout': layout, 'actions': game.moveHistory} + pickle.dump(components, f) + f.close() + + if (numGames-numTraining) > 0: + scores = [game.state.getScore() for game in games] + wins = [game.state.isWin() for game in games] + winRate = wins.count(True) / float(len(wins)) + print('Average Score:', sum(scores) / float(len(scores))) + print('Scores: ', ', '.join([str(score) for score in scores])) + print('Win Rate: %d/%d (%.2f)' % + (wins.count(True), len(wins), winRate)) + print('Record: ', ', '.join( + [['Loss', 'Win'][int(w)] for w in wins])) + + return games + + +if __name__ == '__main__': + """ + The main function called when pacman.py is run + from the command line: + + > python pacman.py + + See the usage string for more details. + + > python pacman.py --help + """ + args = readCommand(sys.argv[1:]) # Get game components based on input + runGames(**args) + + # import cProfile + # cProfile.run("runGames( **args )") + pass diff --git a/multiagent/pacmanAgents.py b/multiagent/pacmanAgents.py new file mode 100644 index 0000000..30e24ec --- /dev/null +++ b/multiagent/pacmanAgents.py @@ -0,0 +1,63 @@ +# pacmanAgents.py +# --------------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +from pacman import Directions +from game import Agent +import random +import game +import util + + +class LeftTurnAgent(game.Agent): + "An agent that turns left at every opportunity" + + def getAction(self, state): + legal = state.getLegalPacmanActions() + current = state.getPacmanState().configuration.direction + if current == Directions.STOP: + current = Directions.NORTH + left = Directions.LEFT[current] + if left in legal: + return left + if current in legal: + return current + if Directions.RIGHT[current] in legal: + return Directions.RIGHT[current] + if Directions.LEFT[left] in legal: + return Directions.LEFT[left] + return Directions.STOP + + +class GreedyAgent(Agent): + def __init__(self, evalFn="scoreEvaluation"): + self.evaluationFunction = util.lookup(evalFn, globals()) + assert self.evaluationFunction != None + + def getAction(self, state): + # Generate candidate actions + legal = state.getLegalPacmanActions() + if Directions.STOP in legal: + legal.remove(Directions.STOP) + + successors = [(state.generateSuccessor(0, action), action) + for action in legal] + scored = [(self.evaluationFunction(state), action) + for state, action in successors] + bestScore = max(scored)[0] + bestActions = [pair[1] for pair in scored if pair[0] == bestScore] + return random.choice(bestActions) + + +def scoreEvaluation(state): + return state.getScore() diff --git a/multiagent/projectParams.py b/multiagent/projectParams.py new file mode 100644 index 0000000..468632c --- /dev/null +++ b/multiagent/projectParams.py @@ -0,0 +1,18 @@ +# projectParams.py +# ---------------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +STUDENT_CODE_DEFAULT = 'multiAgents.py' +PROJECT_TEST_CLASSES = 'multiagentTestClasses.py' +PROJECT_NAME = 'Project 2: Multiagent search' +BONUS_PIC = False diff --git a/multiagent/testClasses.py b/multiagent/testClasses.py new file mode 100644 index 0000000..22cb5ad --- /dev/null +++ b/multiagent/testClasses.py @@ -0,0 +1,212 @@ +# testClasses.py +# -------------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +# import modules from python standard library +import inspect +import re +import sys +import time + +# Class which models a question in a project. Note that questions have a +# maximum number of points they are worth, and are composed of a series of +# test cases +class Question(object): + + def raiseNotDefined(self): + print('Method not implemented: %s' % inspect.stack()[1][3]) + sys.exit(1) + + def __init__(self, questionDict, display): + self.maxPoints = int(questionDict['max_points']) + self.testCases = [] + self.display = display + + def getDisplay(self): + return self.display + + def getMaxPoints(self): + return self.maxPoints + + # Note that 'thunk' must be a function which accepts a single argument, + # namely a 'grading' object + def addTestCase(self, testCase, thunk): + self.testCases.append((testCase, thunk)) + + def execute(self, grades): + self.raiseNotDefined() + +# Question in which all test cases must be passed in order to receive credit +class PassAllTestsQuestion(Question): + + def execute(self, grades): + # TODO: is this the right way to use grades? The autograder doesn't seem to use it. + testsFailed = False + grades.assignZeroCredit() + for _, f in self.testCases: + start = time.time() + if not f(grades): + testsFailed = True + end = time.time() + if end-start > 60: + print("WARNING: this question took over 60 seconds to run. This may end up timing out the gradescope autograder") + if testsFailed: + grades.fail("Tests failed.") + else: + grades.assignFullCredit() + + +class ExtraCreditPassAllTestsQuestion(Question): + def __init__(self, questionDict, display): + Question.__init__(self, questionDict, display) + self.extraPoints = int(questionDict['extra_points']) + + def execute(self, grades): + # TODO: is this the right way to use grades? The autograder doesn't seem to use it. + testsFailed = False + grades.assignZeroCredit() + for _, f in self.testCases: + if not f(grades): + testsFailed = True + if testsFailed: + grades.fail("Tests failed.") + else: + grades.assignFullCredit() + grades.addPoints(self.extraPoints) + +# Question in which predict credit is given for test cases with a ``points'' property. +# All other tests are mandatory and must be passed. +class HackedPartialCreditQuestion(Question): + + def execute(self, grades): + # TODO: is this the right way to use grades? The autograder doesn't seem to use it. + grades.assignZeroCredit() + + points = 0 + passed = True + for testCase, f in self.testCases: + testResult = f(grades) + if "points" in testCase.testDict: + if testResult: + points += float(testCase.testDict["points"]) + else: + passed = passed and testResult + + # FIXME: Below terrible hack to match q3's logic + if int(points) == self.maxPoints and not passed: + grades.assignZeroCredit() + else: + grades.addPoints(int(points)) + + +class Q6PartialCreditQuestion(Question): + """Fails any test which returns False, otherwise doesn't effect the grades object. + Partial credit tests will add the required points.""" + + def execute(self, grades): + grades.assignZeroCredit() + + results = [] + for _, f in self.testCases: + results.append(f(grades)) + if False in results: + grades.assignZeroCredit() + + +class PartialCreditQuestion(Question): + """Fails any test which returns False, otherwise doesn't effect the grades object. + Partial credit tests will add the required points.""" + + def execute(self, grades): + grades.assignZeroCredit() + + for _, f in self.testCases: + start = time.time() + if not f(grades): + grades.assignZeroCredit() + grades.fail("Tests failed.") + return False + end = time.time() + if end-start > 300: + print("WARNING: this question took over 5 minutes to run. This may end up timing out the gradescope autograder") + + +class NumberPassedQuestion(Question): + """Grade is the number of test cases passed.""" + + def execute(self, grades): + grades.addPoints([f(grades) for _, f in self.testCases].count(True)) + + +# Template modeling a generic test case +class TestCase(object): + + def raiseNotDefined(self): + print('Method not implemented: %s' % inspect.stack()[1][3]) + sys.exit(1) + + def getPath(self): + return self.path + + def __init__(self, question, testDict): + self.question = question + self.testDict = testDict + self.path = testDict['path'] + self.messages = [] + + def __str__(self): + self.raiseNotDefined() + + def execute(self, grades, moduleDict, solutionDict): + self.raiseNotDefined() + + def writeSolution(self, moduleDict, filePath): + self.raiseNotDefined() + return True + + # Tests should call the following messages for grading + # to ensure a uniform format for test output. + # + # TODO: this is hairy, but we need to fix grading.py's interface + # to get a nice hierarchical project - question - test structure, + # then these should be moved into Question proper. + def testPass(self, grades): + grades.addMessage('PASS: %s' % (self.path,)) + for line in self.messages: + grades.addMessage(' %s' % (line,)) + return True + + def testFail(self, grades): + grades.addMessage('FAIL: %s' % (self.path,)) + for line in self.messages: + grades.addMessage(' %s' % (line,)) + return False + + # This should really be question level? + def testPartial(self, grades, points, maxPoints): + grades.addPoints(points) + extraCredit = max(0, points - maxPoints) + regularCredit = points - extraCredit + + grades.addMessage('%s: %s (%s of %s points)' % ( + "PASS" if points >= maxPoints else "FAIL", self.path, regularCredit, maxPoints)) + if extraCredit > 0: + grades.addMessage('EXTRA CREDIT: %s points' % (extraCredit,)) + + for line in self.messages: + grades.addMessage(' %s' % (line,)) + + return True + + def addMessage(self, message): + self.messages.extend(message.split('\n')) diff --git a/multiagent/testParser.py b/multiagent/testParser.py new file mode 100644 index 0000000..1d720e7 --- /dev/null +++ b/multiagent/testParser.py @@ -0,0 +1,86 @@ +# testParser.py +# ------------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +import re +import sys + + +class TestParser(object): + + def __init__(self, path): + # save the path to the test file + self.path = path + + def removeComments(self, rawlines): + # remove any portion of a line following a '#' symbol + fixed_lines = [] + for l in rawlines: + idx = l.find('#') + if idx == -1: + fixed_lines.append(l) + else: + fixed_lines.append(l[0:idx]) + return '\n'.join(fixed_lines) + + def parse(self): + # read in the test case and remove comments + test = {} + with open(self.path) as handle: + raw_lines = handle.read().split('\n') + + test_text = self.removeComments(raw_lines) + test['__raw_lines__'] = raw_lines + test['path'] = self.path + test['__emit__'] = [] + lines = test_text.split('\n') + i = 0 + # read a property in each loop cycle + while(i < len(lines)): + # skip blank lines + if re.match(r'\A\s*\Z', lines[i]): + test['__emit__'].append(("raw", raw_lines[i])) + i += 1 + continue + m = re.match(r'\A([^"]*?):\s*"([^"]*)"\s*\Z', lines[i]) + if m: + test[m.group(1)] = m.group(2) + test['__emit__'].append(("oneline", m.group(1))) + i += 1 + continue + m = re.match(r'\A([^"]*?):\s*"""\s*\Z', lines[i]) + if m: + msg = [] + i += 1 + while(not re.match(r'\A\s*"""\s*\Z', lines[i])): + msg.append(raw_lines[i]) + i += 1 + test[m.group(1)] = '\n'.join(msg) + test['__emit__'].append(("multiline", m.group(1))) + i += 1 + continue + print('error parsing test file: %s' % self.path) + sys.exit(1) + return test + + +def emitTestDict(testDict, handle): + for kind, data in testDict['__emit__']: + if kind == "raw": + handle.write(data + "\n") + elif kind == "oneline": + handle.write('%s: "%s"\n' % (data, testDict[data])) + elif kind == "multiline": + handle.write('%s: """\n%s\n"""\n' % (data, testDict[data])) + else: + raise Exception("Bad __emit__") diff --git a/multiagent/test_cases/CONFIG b/multiagent/test_cases/CONFIG new file mode 100644 index 0000000..dc6ad04 --- /dev/null +++ b/multiagent/test_cases/CONFIG @@ -0,0 +1 @@ +order: "q1 q2 q3 q4 q5" diff --git a/multiagent/test_cases/extra/CONFIG b/multiagent/test_cases/extra/CONFIG new file mode 100644 index 0000000..76c89af --- /dev/null +++ b/multiagent/test_cases/extra/CONFIG @@ -0,0 +1,2 @@ +max_points: "0" +class: "PartialCreditQuestion" diff --git a/multiagent/test_cases/extra/grade-agent.test b/multiagent/test_cases/extra/grade-agent.test new file mode 100644 index 0000000..3d8f123 --- /dev/null +++ b/multiagent/test_cases/extra/grade-agent.test @@ -0,0 +1,11 @@ +class: "EvalAgentTest" + +agentName: "ContestAgent" +layoutName: "contestClassic" +maxTime: "180" +numGames: "5" + +scoreThresholds: "2500 2900" + +randomSeed: "0" +ghosts: "[DirectionalGhost(1), DirectionalGhost(2), DirectionalGhost(3)]" diff --git a/multiagent/test_cases/q1/CONFIG b/multiagent/test_cases/q1/CONFIG new file mode 100644 index 0000000..506820a --- /dev/null +++ b/multiagent/test_cases/q1/CONFIG @@ -0,0 +1,2 @@ +max_points: "4" +class: "PartialCreditQuestion" diff --git a/multiagent/test_cases/q1/grade-agent.solution b/multiagent/test_cases/q1/grade-agent.solution new file mode 100644 index 0000000..c5b1889 --- /dev/null +++ b/multiagent/test_cases/q1/grade-agent.solution @@ -0,0 +1,2 @@ +# This is the solution file for test_cases/q1/grade-agent.test. +# File intentionally blank. diff --git a/multiagent/test_cases/q1/grade-agent.test b/multiagent/test_cases/q1/grade-agent.test new file mode 100644 index 0000000..bdd7fe4 --- /dev/null +++ b/multiagent/test_cases/q1/grade-agent.test @@ -0,0 +1,18 @@ +class: "EvalAgentTest" + +agentName: "ReflexAgent" +layoutName: "openClassic" +maxTime: "120" +numGames: "10" + + +nonTimeoutMinimum: "10" + +scoreThresholds: "500 1000" + +winsMinimum: "1" +winsThresholds: "5 10" + + +randomSeed: "0" +ghosts: "[RandomGhost(1)]" diff --git a/multiagent/test_cases/q2/0-eval-function-lose-states-1.solution b/multiagent/test_cases/q2/0-eval-function-lose-states-1.solution new file mode 100644 index 0000000..f0088bb --- /dev/null +++ b/multiagent/test_cases/q2/0-eval-function-lose-states-1.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/0-eval-function-lose-states-1.test. +action: "Left" +generated: "lose1 lose2 root" diff --git a/multiagent/test_cases/q2/0-eval-function-lose-states-1.test b/multiagent/test_cases/q2/0-eval-function-lose-states-1.test new file mode 100644 index 0000000..974f0cc --- /dev/null +++ b/multiagent/test_cases/q2/0-eval-function-lose-states-1.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "2" + +diagram: """ + root + / \ + lose1 lose2 + 1 0 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on losing states. +""" +num_agents: "2" + +start_state: "root" +win_states: "" +lose_states: "lose1 lose2" + +successors: """ +root Left lose1 +root Right lose2 +""" + +evaluation: """ +lose1 1.0 +lose2 0.0 +""" + diff --git a/multiagent/test_cases/q2/0-eval-function-lose-states-2.solution b/multiagent/test_cases/q2/0-eval-function-lose-states-2.solution new file mode 100644 index 0000000..e0a44f7 --- /dev/null +++ b/multiagent/test_cases/q2/0-eval-function-lose-states-2.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/0-eval-function-lose-states-2.test. +action: "Right" +generated: "lose1 lose2 root" diff --git a/multiagent/test_cases/q2/0-eval-function-lose-states-2.test b/multiagent/test_cases/q2/0-eval-function-lose-states-2.test new file mode 100644 index 0000000..b608129 --- /dev/null +++ b/multiagent/test_cases/q2/0-eval-function-lose-states-2.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "2" + +diagram: """ + root + / \ + lose1 lose2 + 0 1 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on losing states. +""" +num_agents: "2" + +start_state: "root" +win_states: "" +lose_states: "lose1 lose2" + +successors: """ +root Left lose1 +root Right lose2 +""" + +evaluation: """ +lose1 0.0 +lose2 1.0 +""" + diff --git a/multiagent/test_cases/q2/0-eval-function-win-states-1.solution b/multiagent/test_cases/q2/0-eval-function-win-states-1.solution new file mode 100644 index 0000000..e731cbb --- /dev/null +++ b/multiagent/test_cases/q2/0-eval-function-win-states-1.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/0-eval-function-win-states-1.test. +action: "Left" +generated: "root win1 win2" diff --git a/multiagent/test_cases/q2/0-eval-function-win-states-1.test b/multiagent/test_cases/q2/0-eval-function-win-states-1.test new file mode 100644 index 0000000..0fd3c23 --- /dev/null +++ b/multiagent/test_cases/q2/0-eval-function-win-states-1.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "2" + +diagram: """ + root + / \ + win1 win2 + 1 0 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on winning states. +""" +num_agents: "2" + +start_state: "root" +win_states: "win1 win2" +lose_states: "" + +successors: """ +root Left win1 +root Right win2 +""" + +evaluation: """ +win1 1.0 +win2 0.0 +""" + diff --git a/multiagent/test_cases/q2/0-eval-function-win-states-2.solution b/multiagent/test_cases/q2/0-eval-function-win-states-2.solution new file mode 100644 index 0000000..1dbcd4c --- /dev/null +++ b/multiagent/test_cases/q2/0-eval-function-win-states-2.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/0-eval-function-win-states-2.test. +action: "Right" +generated: "root win1 win2" diff --git a/multiagent/test_cases/q2/0-eval-function-win-states-2.test b/multiagent/test_cases/q2/0-eval-function-win-states-2.test new file mode 100644 index 0000000..4f8369c --- /dev/null +++ b/multiagent/test_cases/q2/0-eval-function-win-states-2.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "2" + +diagram: """ + root + / \ + win1 win2 + 0 1 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on winning states. +""" +num_agents: "2" + +start_state: "root" +win_states: "win1 win2" +lose_states: "" + +successors: """ +root Left win1 +root Right win2 +""" + +evaluation: """ +win1 0.0 +win2 1.0 +""" + diff --git a/multiagent/test_cases/q2/0-lecture-6-tree.solution b/multiagent/test_cases/q2/0-lecture-6-tree.solution new file mode 100644 index 0000000..1e0834d --- /dev/null +++ b/multiagent/test_cases/q2/0-lecture-6-tree.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/0-lecture-6-tree.test. +action: "Center" +generated: "A B C D E F G H I max min1 min2 min3" diff --git a/multiagent/test_cases/q2/0-lecture-6-tree.test b/multiagent/test_cases/q2/0-lecture-6-tree.test new file mode 100644 index 0000000..6d516a0 --- /dev/null +++ b/multiagent/test_cases/q2/0-lecture-6-tree.test @@ -0,0 +1,50 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "2" + +# Tree from lecture 6 slides +diagram: """ + max + /-/ | \--\ + / | \ + / | \ + min1 min2 min3 + /|\ /|\ /|\ + / | \ / | \ / | \ +A B C D E F G H I +3 12 8 5 4 6 14 1 11 +""" + +num_agents: "2" + +start_state: "max" +win_states: "A B C D E F G H I" +lose_states: "" + +successors: """ +max Left min1 +max Center min2 +max Right min3 +min1 Left A +min1 Center B +min1 Right C +min2 Left D +min2 Center E +min2 Right F +min3 Left G +min3 Center H +min3 Right I +""" + + +evaluation: """ +A 3.0 +B 12.0 +C 8.0 +D 5.0 +E 4.0 +F 6.0 +G 14.0 +H 1.0 +I 11.0 +""" diff --git a/multiagent/test_cases/q2/0-small-tree.solution b/multiagent/test_cases/q2/0-small-tree.solution new file mode 100644 index 0000000..275257d --- /dev/null +++ b/multiagent/test_cases/q2/0-small-tree.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/0-small-tree.test. +action: "pacLeft" +generated: "A B C D deeper minLeft minRight root" diff --git a/multiagent/test_cases/q2/0-small-tree.test b/multiagent/test_cases/q2/0-small-tree.test new file mode 100644 index 0000000..5ab2926 --- /dev/null +++ b/multiagent/test_cases/q2/0-small-tree.test @@ -0,0 +1,36 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "3" + +diagram: """ + root + / \ + minLeft minRight + / \ / \ + A B C deeper + 4 3 2 | + D + 1000 +""" +num_agents: "2" + +start_state: "root" +win_states: "A C" +lose_states: "B D" + +successors: """ +root pacLeft minLeft +root pacRight minRight +minLeft gLeft A +minLeft gRight B +minRight gLeft C +minRight gRight deeper +deeper pacLeft D +""" + +evaluation: """ +A 4.0 +B 3.0 +C 2.0 +D 1000.0 +""" diff --git a/multiagent/test_cases/q2/1-1-minmax.solution b/multiagent/test_cases/q2/1-1-minmax.solution new file mode 100644 index 0000000..92838e9 --- /dev/null +++ b/multiagent/test_cases/q2/1-1-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/1-1-minmax.test. +action: "Left" +generated: "a b1 b2 c1 c2 cx d1 d2 d3 d4 dx" diff --git a/multiagent/test_cases/q2/1-1-minmax.test b/multiagent/test_cases/q2/1-1-minmax.test new file mode 100644 index 0000000..2f7c288 --- /dev/null +++ b/multiagent/test_cases/q2/1-1-minmax.test @@ -0,0 +1,47 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | + c1 c2 cx + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -3.01 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -3.01 +""" diff --git a/multiagent/test_cases/q2/1-2-minmax.solution b/multiagent/test_cases/q2/1-2-minmax.solution new file mode 100644 index 0000000..166571a --- /dev/null +++ b/multiagent/test_cases/q2/1-2-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/1-2-minmax.test. +action: "Right" +generated: "a b1 b2 c1 c2 cx d1 d2 d3 d4 dx" diff --git a/multiagent/test_cases/q2/1-2-minmax.test b/multiagent/test_cases/q2/1-2-minmax.test new file mode 100644 index 0000000..67afad8 --- /dev/null +++ b/multiagent/test_cases/q2/1-2-minmax.test @@ -0,0 +1,47 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | + c1 c2 cx + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -2.99 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -2.99 +""" diff --git a/multiagent/test_cases/q2/1-3-minmax.solution b/multiagent/test_cases/q2/1-3-minmax.solution new file mode 100644 index 0000000..0de3ad3 --- /dev/null +++ b/multiagent/test_cases/q2/1-3-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/1-3-minmax.test. +action: "Left" +generated: "a b1 b2 c3 c4 cx d5 d6 d7 d8 dx" diff --git a/multiagent/test_cases/q2/1-3-minmax.test b/multiagent/test_cases/q2/1-3-minmax.test new file mode 100644 index 0000000..85aa14c --- /dev/null +++ b/multiagent/test_cases/q2/1-3-minmax.test @@ -0,0 +1,47 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + cx c3 c4 + | / \ / \ + dx d5 d6 d7 d8 + 4.01 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b2 is 4. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 4.01 +""" diff --git a/multiagent/test_cases/q2/1-4-minmax.solution b/multiagent/test_cases/q2/1-4-minmax.solution new file mode 100644 index 0000000..03ccec4 --- /dev/null +++ b/multiagent/test_cases/q2/1-4-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/1-4-minmax.test. +action: "Right" +generated: "a b1 b2 c3 c4 cx d5 d6 d7 d8 dx" diff --git a/multiagent/test_cases/q2/1-4-minmax.test b/multiagent/test_cases/q2/1-4-minmax.test new file mode 100644 index 0000000..16cbeee --- /dev/null +++ b/multiagent/test_cases/q2/1-4-minmax.test @@ -0,0 +1,47 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + cx c3 c4 + | / \ / \ + dx d5 d6 d7 d8 + 3.99 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b2 is 4. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 3.99 +""" diff --git a/multiagent/test_cases/q2/1-5-minmax.solution b/multiagent/test_cases/q2/1-5-minmax.solution new file mode 100644 index 0000000..cdb2622 --- /dev/null +++ b/multiagent/test_cases/q2/1-5-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/1-5-minmax.test. +action: "Right" +generated: "A B C D E F G H Z a b1 b2 c1 c2 cx d1 d2 d3 d4 dx" diff --git a/multiagent/test_cases/q2/1-5-minmax.test b/multiagent/test_cases/q2/1-5-minmax.test new file mode 100644 index 0000000..e9785a0 --- /dev/null +++ b/multiagent/test_cases/q2/1-5-minmax.test @@ -0,0 +1,75 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | + c1 c2 cx + / \ / \ | + d1 d2 d3 d4 dx + / \ / \ / \ / \ | + A B C D E F G H Z +-3 13 5 9 10 3 -6 8 3.01 + +a - max +b - min +c - max +d - min + +Note the minimax value of b1 is 3. +""" +num_agents: "2" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P Z" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +dx Down Z +""" + +evaluation: """ +A -3.0 +B 13.0 +C 5.0 +D 9.0 +E 10.0 +F 3.0 +G -6.0 +H 8.0 +Z 3.01 +""" diff --git a/multiagent/test_cases/q2/1-6-minmax.solution b/multiagent/test_cases/q2/1-6-minmax.solution new file mode 100644 index 0000000..77e6960 --- /dev/null +++ b/multiagent/test_cases/q2/1-6-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/1-6-minmax.test. +action: "Left" +generated: "A B C D E F G H Z a b1 b2 c1 c2 cx d1 d2 d3 d4 dx" diff --git a/multiagent/test_cases/q2/1-6-minmax.test b/multiagent/test_cases/q2/1-6-minmax.test new file mode 100644 index 0000000..2d25946 --- /dev/null +++ b/multiagent/test_cases/q2/1-6-minmax.test @@ -0,0 +1,75 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | + c1 c2 cx + / \ / \ | + d1 d2 d3 d4 dx + / \ / \ / \ / \ | + A B C D E F G H Z +-3 13 5 9 10 3 -6 8 2.99 + +a - max +b - min +c - max +d - min + +Note the minimax value of b1 is 3. +""" +num_agents: "2" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P Z" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +dx Down Z +""" + +evaluation: """ +A -3.0 +B 13.0 +C 5.0 +D 9.0 +E 10.0 +F 3.0 +G -6.0 +H 8.0 +Z 2.99 +""" diff --git a/multiagent/test_cases/q2/1-7-minmax.solution b/multiagent/test_cases/q2/1-7-minmax.solution new file mode 100644 index 0000000..e47628d --- /dev/null +++ b/multiagent/test_cases/q2/1-7-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/1-7-minmax.test. +action: "Left" +generated: "I J K L M N O P Z a b1 b2 c3 c4 cx d5 d6 d7 d8 dx" diff --git a/multiagent/test_cases/q2/1-7-minmax.test b/multiagent/test_cases/q2/1-7-minmax.test new file mode 100644 index 0000000..7f402b7 --- /dev/null +++ b/multiagent/test_cases/q2/1-7-minmax.test @@ -0,0 +1,75 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + cx c3 c4 + | / \ / \ + dx d5 d6 d7 d8 + | / \ / \ / \ / \ + Z I J K L M N O P + -1.99 -1 -9 4 7 2 5 -3 -2 + +a - max +b - min +c - min +d - max + +Note that the minimax value of b2 is -2 +""" +num_agents: "3" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P Z" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +dx Down Z +""" + +evaluation: """ +I -1.0 +J -9.0 +K 4.0 +L 7.0 +M 2.0 +N 5.0 +O -3.0 +P -2.0 +Z -1.99 +""" diff --git a/multiagent/test_cases/q2/1-8-minmax.solution b/multiagent/test_cases/q2/1-8-minmax.solution new file mode 100644 index 0000000..e0357d4 --- /dev/null +++ b/multiagent/test_cases/q2/1-8-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/1-8-minmax.test. +action: "Right" +generated: "I J K L M N O P Z a b1 b2 c3 c4 cx d5 d6 d7 d8 dx" diff --git a/multiagent/test_cases/q2/1-8-minmax.test b/multiagent/test_cases/q2/1-8-minmax.test new file mode 100644 index 0000000..d3eb8e6 --- /dev/null +++ b/multiagent/test_cases/q2/1-8-minmax.test @@ -0,0 +1,75 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + cx c3 c4 + | / \ / \ + dx d5 d6 d7 d8 + | / \ / \ / \ / \ + Z I J K L M N O P + -2.01 -1 -9 4 7 2 5 -3 -2 + +a - max +b - min +c - min +d - max + +Note that the minimax value of b2 is -2.01 +""" +num_agents: "3" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P Z" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +dx Down Z +""" + +evaluation: """ +I -1.0 +J -9.0 +K 4.0 +L 7.0 +M 2.0 +N 5.0 +O -3.0 +P -2.0 +Z -2.01 +""" diff --git a/multiagent/test_cases/q2/2-1a-vary-depth.solution b/multiagent/test_cases/q2/2-1a-vary-depth.solution new file mode 100644 index 0000000..ef813a0 --- /dev/null +++ b/multiagent/test_cases/q2/2-1a-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/2-1a-vary-depth.test. +action: "Left" +generated: "a b1 b2 c1 c2 cx" diff --git a/multiagent/test_cases/q2/2-1a-vary-depth.test b/multiagent/test_cases/q2/2-1a-vary-depth.test new file mode 100644 index 0000000..5265ae1 --- /dev/null +++ b/multiagent/test_cases/q2/2-1a-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "1" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | +-4 c1 c2 9 cx -4.01 + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -4.01 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3, but the depth=1 limited value is -4. +The values next to c1, c2, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +c1 -4.0 +c2 9.0 +cx -4.01 +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -4.01 +""" diff --git a/multiagent/test_cases/q2/2-1b-vary-depth.solution b/multiagent/test_cases/q2/2-1b-vary-depth.solution new file mode 100644 index 0000000..b9636df --- /dev/null +++ b/multiagent/test_cases/q2/2-1b-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/2-1b-vary-depth.test. +action: "Left" +generated: "a b1 b2 c1 c2 cx d1 d2 d3 d4 dx" diff --git a/multiagent/test_cases/q2/2-1b-vary-depth.test b/multiagent/test_cases/q2/2-1b-vary-depth.test new file mode 100644 index 0000000..8d0434e --- /dev/null +++ b/multiagent/test_cases/q2/2-1b-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "2" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | +-4 c1 c2 9 cx -4.01 + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -4.01 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3, but the depth=1 limited value is -4. +The values next to c1, c2, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +c1 -4.0 +c2 9.0 +cx -4.01 +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -4.01 +""" diff --git a/multiagent/test_cases/q2/2-2a-vary-depth.solution b/multiagent/test_cases/q2/2-2a-vary-depth.solution new file mode 100644 index 0000000..75eab88 --- /dev/null +++ b/multiagent/test_cases/q2/2-2a-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/2-2a-vary-depth.test. +action: "Right" +generated: "a b1 b2 c1 c2 cx" diff --git a/multiagent/test_cases/q2/2-2a-vary-depth.test b/multiagent/test_cases/q2/2-2a-vary-depth.test new file mode 100644 index 0000000..c6ba62d --- /dev/null +++ b/multiagent/test_cases/q2/2-2a-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "1" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | +-4 c1 c2 9 cx -3.99 + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -3.99 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3, but the depth=1 limited value is -4. +The values next to c1, c2, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +c1 -4.0 +c2 9.0 +cx -3.99 +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -3.99 +""" diff --git a/multiagent/test_cases/q2/2-2b-vary-depth.solution b/multiagent/test_cases/q2/2-2b-vary-depth.solution new file mode 100644 index 0000000..831182c --- /dev/null +++ b/multiagent/test_cases/q2/2-2b-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/2-2b-vary-depth.test. +action: "Left" +generated: "a b1 b2 c1 c2 cx d1 d2 d3 d4 dx" diff --git a/multiagent/test_cases/q2/2-2b-vary-depth.test b/multiagent/test_cases/q2/2-2b-vary-depth.test new file mode 100644 index 0000000..ea58c39 --- /dev/null +++ b/multiagent/test_cases/q2/2-2b-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "2" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | +-4 c1 c2 9 cx -3.99 + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -3.99 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3, but the depth=1 limited value is -4. +The values next to c1, c2, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +c1 -4.0 +c2 9.0 +cx -3.99 +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -3.99 +""" diff --git a/multiagent/test_cases/q2/2-3a-vary-depth.solution b/multiagent/test_cases/q2/2-3a-vary-depth.solution new file mode 100644 index 0000000..bbf705c --- /dev/null +++ b/multiagent/test_cases/q2/2-3a-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/2-3a-vary-depth.test. +action: "Left" +generated: "a b1 b2 c3 c4 cx" diff --git a/multiagent/test_cases/q2/2-3a-vary-depth.test b/multiagent/test_cases/q2/2-3a-vary-depth.test new file mode 100644 index 0000000..362cbbc --- /dev/null +++ b/multiagent/test_cases/q2/2-3a-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "1" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + 5.01 cx 8 c3 c4 5 + | / \ / \ + dx d5 d6 d7 d8 + 5.01 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b1 is 4, but the depth=1 limited value is 5. +The values next to c3, c4, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +c3 8.0 +c4 5.0 +cx 5.01 +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 5.01 +""" diff --git a/multiagent/test_cases/q2/2-3b-vary-depth.solution b/multiagent/test_cases/q2/2-3b-vary-depth.solution new file mode 100644 index 0000000..936bb80 --- /dev/null +++ b/multiagent/test_cases/q2/2-3b-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/2-3b-vary-depth.test. +action: "Left" +generated: "a b1 b2 c3 c4 cx d5 d6 d7 d8 dx" diff --git a/multiagent/test_cases/q2/2-3b-vary-depth.test b/multiagent/test_cases/q2/2-3b-vary-depth.test new file mode 100644 index 0000000..e532bc7 --- /dev/null +++ b/multiagent/test_cases/q2/2-3b-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "2" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + 5.01 cx 8 c3 c4 5 + | / \ / \ + dx d5 d6 d7 d8 + 5.01 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b1 is 4, but the depth=1 limited value is 5. +The values next to c3, c4, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +c3 8.0 +c4 5.0 +cx 5.01 +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 5.01 +""" diff --git a/multiagent/test_cases/q2/2-4a-vary-depth.solution b/multiagent/test_cases/q2/2-4a-vary-depth.solution new file mode 100644 index 0000000..1161dfc --- /dev/null +++ b/multiagent/test_cases/q2/2-4a-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/2-4a-vary-depth.test. +action: "Right" +generated: "a b1 b2 c3 c4 cx" diff --git a/multiagent/test_cases/q2/2-4a-vary-depth.test b/multiagent/test_cases/q2/2-4a-vary-depth.test new file mode 100644 index 0000000..534631c --- /dev/null +++ b/multiagent/test_cases/q2/2-4a-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "1" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + 4.99 cx 8 c3 c4 5 + | / \ / \ + dx d5 d6 d7 d8 + 4.99 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b1 is 4, but the depth=1 limited value is 5. +The values next to c3, c4, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +c3 8.0 +c4 5.0 +cx 4.99 +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 4.99 +""" diff --git a/multiagent/test_cases/q2/2-4b-vary-depth.solution b/multiagent/test_cases/q2/2-4b-vary-depth.solution new file mode 100644 index 0000000..c5d91d7 --- /dev/null +++ b/multiagent/test_cases/q2/2-4b-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/2-4b-vary-depth.test. +action: "Left" +generated: "a b1 b2 c3 c4 cx d5 d6 d7 d8 dx" diff --git a/multiagent/test_cases/q2/2-4b-vary-depth.test b/multiagent/test_cases/q2/2-4b-vary-depth.test new file mode 100644 index 0000000..c884af4 --- /dev/null +++ b/multiagent/test_cases/q2/2-4b-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "2" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + 4.99 cx 8 c3 c4 5 + | / \ / \ + dx d5 d6 d7 d8 + 4.99 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b1 is 4, but the depth=1 limited value is 5. +The values next to c3, c4, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +c3 8.0 +c4 5.0 +cx 4.99 +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 4.99 +""" diff --git a/multiagent/test_cases/q2/2-one-ghost-3level.solution b/multiagent/test_cases/q2/2-one-ghost-3level.solution new file mode 100644 index 0000000..5d87269 --- /dev/null +++ b/multiagent/test_cases/q2/2-one-ghost-3level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/2-one-ghost-3level.test. +action: "Left" +generated: "a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8" diff --git a/multiagent/test_cases/q2/2-one-ghost-3level.test b/multiagent/test_cases/q2/2-one-ghost-3level.test new file mode 100644 index 0000000..37fc5c6 --- /dev/null +++ b/multiagent/test_cases/q2/2-one-ghost-3level.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 + 3 9 10 6 4 7 0 5 + +a - max +b - min +c - max +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +""" + +evaluation: """ +d1 3.0 +d2 9.0 +d3 10.0 +d4 6.0 +d5 4.0 +d6 7.0 +d7 0.0 +d8 5.0 +""" diff --git a/multiagent/test_cases/q2/3-one-ghost-4level.solution b/multiagent/test_cases/q2/3-one-ghost-4level.solution new file mode 100644 index 0000000..b4af8a5 --- /dev/null +++ b/multiagent/test_cases/q2/3-one-ghost-4level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/3-one-ghost-4level.test. +action: "Left" +generated: "A B C D E F G H I J K L M N O P a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8" diff --git a/multiagent/test_cases/q2/3-one-ghost-4level.test b/multiagent/test_cases/q2/3-one-ghost-4level.test new file mode 100644 index 0000000..a487e4c --- /dev/null +++ b/multiagent/test_cases/q2/3-one-ghost-4level.test @@ -0,0 +1,79 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 +/ \ / \ / \ / \ / \ / \ / \ / \ +A B C D E F G H I J K L M N O P +3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14 + +a - max +b - min +c - max +d - min +""" +num_agents: "2" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +""" + +evaluation: """ +A 3.0 +B 13.0 +C 5.0 +D 9.0 +E 10.0 +F 11.0 +G 6.0 +H 8.0 +I 1.0 +J 0.0 +K 4.0 +L 7.0 +M 12.0 +N 15.0 +O 2.0 +P 14.0 +""" diff --git a/multiagent/test_cases/q2/4-two-ghosts-3level.solution b/multiagent/test_cases/q2/4-two-ghosts-3level.solution new file mode 100644 index 0000000..45000cd --- /dev/null +++ b/multiagent/test_cases/q2/4-two-ghosts-3level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/4-two-ghosts-3level.test. +action: "Left" +generated: "a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8" diff --git a/multiagent/test_cases/q2/4-two-ghosts-3level.test b/multiagent/test_cases/q2/4-two-ghosts-3level.test new file mode 100644 index 0000000..712ff27 --- /dev/null +++ b/multiagent/test_cases/q2/4-two-ghosts-3level.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 + 3 9 10 6 4 7 0 5 + +a - max +b - min +c - min +""" +num_agents: "3" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +""" + +evaluation: """ +d1 3.0 +d2 9.0 +d3 10.0 +d4 6.0 +d5 4.0 +d6 7.0 +d7 0.0 +d8 5.0 +""" diff --git a/multiagent/test_cases/q2/5-two-ghosts-4level.solution b/multiagent/test_cases/q2/5-two-ghosts-4level.solution new file mode 100644 index 0000000..12c0a9b --- /dev/null +++ b/multiagent/test_cases/q2/5-two-ghosts-4level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/5-two-ghosts-4level.test. +action: "Left" +generated: "A B C D E F G H I J K L M N O P a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8" diff --git a/multiagent/test_cases/q2/5-two-ghosts-4level.test b/multiagent/test_cases/q2/5-two-ghosts-4level.test new file mode 100644 index 0000000..d1badd2 --- /dev/null +++ b/multiagent/test_cases/q2/5-two-ghosts-4level.test @@ -0,0 +1,79 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 +/ \ / \ / \ / \ / \ / \ / \ / \ +A B C D E F G H I J K L M N O P +3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14 + +a - max +b - min +c - min +d - max +""" +num_agents: "3" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +""" + +evaluation: """ +A 3.0 +B 13.0 +C 5.0 +D 9.0 +E 10.0 +F 11.0 +G 6.0 +H 8.0 +I 1.0 +J 0.0 +K 4.0 +L 7.0 +M 12.0 +N 15.0 +O 2.0 +P 14.0 +""" diff --git a/multiagent/test_cases/q2/6-tied-root.solution b/multiagent/test_cases/q2/6-tied-root.solution new file mode 100644 index 0000000..b93d188 --- /dev/null +++ b/multiagent/test_cases/q2/6-tied-root.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/6-tied-root.test. +action: "Left" +generated: "A B C max min1 min2" diff --git a/multiagent/test_cases/q2/6-tied-root.test b/multiagent/test_cases/q2/6-tied-root.test new file mode 100644 index 0000000..2bd0cbe --- /dev/null +++ b/multiagent/test_cases/q2/6-tied-root.test @@ -0,0 +1,31 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "3" + +diagram: """ + max + / \ +min1 min2 + | / \ + A B C +10 10 0 +""" +num_agents: "2" + +start_state: "max" +win_states: "A B" +lose_states: "C" + +successors: """ +max Left min1 +max Right min2 +min1 Down A +min2 Left B +min2 Right C +""" + +evaluation: """ +A 10.0 +B 10.0 +C 0.0 +""" diff --git a/multiagent/test_cases/q2/7-1a-check-depth-one-ghost.solution b/multiagent/test_cases/q2/7-1a-check-depth-one-ghost.solution new file mode 100644 index 0000000..51149f9 --- /dev/null +++ b/multiagent/test_cases/q2/7-1a-check-depth-one-ghost.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/7-1a-check-depth-one-ghost.test. +action: "Left" +generated: "a b1 b2 b3 c1 c2 c3" diff --git a/multiagent/test_cases/q2/7-1a-check-depth-one-ghost.test b/multiagent/test_cases/q2/7-1a-check-depth-one-ghost.test new file mode 100644 index 0000000..627e66b --- /dev/null +++ b/multiagent/test_cases/q2/7-1a-check-depth-one-ghost.test @@ -0,0 +1,83 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "1" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 10 c1 0 c2 c3 8 + | | | + 0 d1 0 d2 d3 8 + | | | + 0 e1 10 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + g1 g2 g3 + 0 0 8 + +a - max +b - min +c - max +d - min +e - max +f - min + +At depth 1, the evaluation function is called at level c, +so Left should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "2" + +start_state: "a" +win_states: "g1 g2 g3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 10.0 +c2 0.0 +c3 8.0 +d1 0.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 10.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 0.0 +g3 8.0 +""" + + diff --git a/multiagent/test_cases/q2/7-1b-check-depth-one-ghost.solution b/multiagent/test_cases/q2/7-1b-check-depth-one-ghost.solution new file mode 100644 index 0000000..994371e --- /dev/null +++ b/multiagent/test_cases/q2/7-1b-check-depth-one-ghost.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/7-1b-check-depth-one-ghost.test. +action: "Center" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3" diff --git a/multiagent/test_cases/q2/7-1b-check-depth-one-ghost.test b/multiagent/test_cases/q2/7-1b-check-depth-one-ghost.test new file mode 100644 index 0000000..3c3e3ea --- /dev/null +++ b/multiagent/test_cases/q2/7-1b-check-depth-one-ghost.test @@ -0,0 +1,83 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "2" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 10 c1 0 c2 c3 8 + | | | + 0 d1 0 d2 d3 8 + | | | + 0 e1 10 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + g1 g2 g3 + 0 0 8 + +a - max +b - min +c - max +d - min +e - max +f - min + +At depth 2, the evaluation function is called at level e, +so Center should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "2" + +start_state: "a" +win_states: "g1 g2 g3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 10.0 +c2 0.0 +c3 8.0 +d1 0.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 10.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 0.0 +g3 8.0 +""" + + diff --git a/multiagent/test_cases/q2/7-1c-check-depth-one-ghost.solution b/multiagent/test_cases/q2/7-1c-check-depth-one-ghost.solution new file mode 100644 index 0000000..5027245 --- /dev/null +++ b/multiagent/test_cases/q2/7-1c-check-depth-one-ghost.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/7-1c-check-depth-one-ghost.test. +action: "Right" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3" diff --git a/multiagent/test_cases/q2/7-1c-check-depth-one-ghost.test b/multiagent/test_cases/q2/7-1c-check-depth-one-ghost.test new file mode 100644 index 0000000..b7d6551 --- /dev/null +++ b/multiagent/test_cases/q2/7-1c-check-depth-one-ghost.test @@ -0,0 +1,83 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "3" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 10 c1 0 c2 c3 8 + | | | + 0 d1 0 d2 d3 8 + | | | + 0 e1 10 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + g1 g2 g3 + 0 0 8 + +a - max +b - min +c - max +d - min +e - max +f - min + +At depth 3, the evaluation function is called at level g, +so Right should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "2" + +start_state: "a" +win_states: "g1 g2 g3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 10.0 +c2 0.0 +c3 8.0 +d1 0.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 10.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 0.0 +g3 8.0 +""" + + diff --git a/multiagent/test_cases/q2/7-2a-check-depth-two-ghosts.solution b/multiagent/test_cases/q2/7-2a-check-depth-two-ghosts.solution new file mode 100644 index 0000000..b3e0ed2 --- /dev/null +++ b/multiagent/test_cases/q2/7-2a-check-depth-two-ghosts.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/7-2a-check-depth-two-ghosts.test. +action: "Left" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3" diff --git a/multiagent/test_cases/q2/7-2a-check-depth-two-ghosts.test b/multiagent/test_cases/q2/7-2a-check-depth-two-ghosts.test new file mode 100644 index 0000000..94c966a --- /dev/null +++ b/multiagent/test_cases/q2/7-2a-check-depth-two-ghosts.test @@ -0,0 +1,110 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "1" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 0 c1 0 c2 c3 8 + | | | + 10 d1 0 d2 d3 8 + | | | + 0 e1 0 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + 0 g1 10 g2 g3 8 + | | | + 0 h1 0 h2 h3 8 + | | | + 0 i1 0 i2 i3 8 + | | | + j1 j2 j3 + 0 0 8 + +a - max +b - min +c - min +d - max +e - min +f - min +g - max +h - min +i - min + +At depth 1, the evaluation function is called at level d, +so Left should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "3" + +start_state: "a" +win_states: "j1 j2 j3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +g1 Center h1 +g2 Center h2 +g3 Center h3 +h1 Center i1 +h2 Center i2 +h3 Center i3 +i1 Center j1 +i2 Center j2 +i3 Center j3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 0.0 +c2 0.0 +c3 8.0 +d1 10.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 0.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 10.0 +g3 8.0 +h1 0.0 +h2 0.0 +h3 8.0 +i1 0.0 +i2 0.0 +i3 8.0 +j1 0.0 +j2 0.0 +j3 8.0 +""" + + diff --git a/multiagent/test_cases/q2/7-2b-check-depth-two-ghosts.solution b/multiagent/test_cases/q2/7-2b-check-depth-two-ghosts.solution new file mode 100644 index 0000000..e737e05 --- /dev/null +++ b/multiagent/test_cases/q2/7-2b-check-depth-two-ghosts.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/7-2b-check-depth-two-ghosts.test. +action: "Center" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3" diff --git a/multiagent/test_cases/q2/7-2b-check-depth-two-ghosts.test b/multiagent/test_cases/q2/7-2b-check-depth-two-ghosts.test new file mode 100644 index 0000000..dbaab5a --- /dev/null +++ b/multiagent/test_cases/q2/7-2b-check-depth-two-ghosts.test @@ -0,0 +1,110 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "2" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 0 c1 0 c2 c3 8 + | | | + 10 d1 0 d2 d3 8 + | | | + 0 e1 0 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + 0 g1 10 g2 g3 8 + | | | + 0 h1 0 h2 h3 8 + | | | + 0 i1 0 i2 i3 8 + | | | + j1 j2 j3 + 0 0 8 + +a - max +b - min +c - min +d - max +e - min +f - min +g - max +h - min +i - min + +At depth 2, the evaluation function is called at level g, +so Center should be returned. If your algorithm is returning +a different action, check how you implemented your depth. +""" + +num_agents: "3" + +start_state: "a" +win_states: "j1 j2 j3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +g1 Center h1 +g2 Center h2 +g3 Center h3 +h1 Center i1 +h2 Center i2 +h3 Center i3 +i1 Center j1 +i2 Center j2 +i3 Center j3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 0.0 +c2 0.0 +c3 8.0 +d1 10.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 0.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 10.0 +g3 8.0 +h1 0.0 +h2 0.0 +h3 8.0 +i1 0.0 +i2 0.0 +i3 8.0 +j1 0.0 +j2 0.0 +j3 8.0 +""" + + diff --git a/multiagent/test_cases/q2/7-2c-check-depth-two-ghosts.solution b/multiagent/test_cases/q2/7-2c-check-depth-two-ghosts.solution new file mode 100644 index 0000000..6d42f5c --- /dev/null +++ b/multiagent/test_cases/q2/7-2c-check-depth-two-ghosts.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q2/7-2c-check-depth-two-ghosts.test. +action: "Right" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3 h1 h2 h3 i1 i2 i3 j1 j2 j3" diff --git a/multiagent/test_cases/q2/7-2c-check-depth-two-ghosts.test b/multiagent/test_cases/q2/7-2c-check-depth-two-ghosts.test new file mode 100644 index 0000000..49612aa --- /dev/null +++ b/multiagent/test_cases/q2/7-2c-check-depth-two-ghosts.test @@ -0,0 +1,110 @@ +class: "GraphGameTreeTest" +alg: "MinimaxAgent" +depth: "3" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 0 c1 0 c2 c3 8 + | | | + 10 d1 0 d2 d3 8 + | | | + 0 e1 0 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + 0 g1 10 g2 g3 8 + | | | + 0 h1 0 h2 h3 8 + | | | + 0 i1 0 i2 i3 8 + | | | + j1 j2 j3 + 0 0 8 + +a - max +b - min +c - min +d - max +e - min +f - min +g - max +h - min +i - min + +At depth 3, the evaluation function is called at level j, +so Right should be returned. If your algorithm is returning +a different action, check how you implemented your depth. +""" + +num_agents: "3" + +start_state: "a" +win_states: "j1 j2 j3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +g1 Center h1 +g2 Center h2 +g3 Center h3 +h1 Center i1 +h2 Center i2 +h3 Center i3 +i1 Center j1 +i2 Center j2 +i3 Center j3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 0.0 +c2 0.0 +c3 8.0 +d1 10.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 0.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 10.0 +g3 8.0 +h1 0.0 +h2 0.0 +h3 8.0 +i1 0.0 +i2 0.0 +i3 8.0 +j1 0.0 +j2 0.0 +j3 8.0 +""" + + diff --git a/multiagent/test_cases/q2/8-pacman-game.solution b/multiagent/test_cases/q2/8-pacman-game.solution new file mode 100644 index 0000000..dcc1184 --- /dev/null +++ b/multiagent/test_cases/q2/8-pacman-game.solution @@ -0,0 +1,444 @@ +optimalActions: """ +[[["West", "East"], 59], [["West", "East"], 35]] +[[["West"], 190], [["West"], 127]] +[[["West"], 190], [["West"], 135]] +[[["West", "North"], 120], [["West", "North"], 82]] +[[["West"], 77], [["West"], 57]] +[[["West", "North"], 143], [["West", "North"], 97]] +[[["West"], 155], [["West"], 110]] +[[["West"], 40], [["West"], 27]] +[[["North"], 64], [["North"], 43]] +[[["North"], 85], [["North"], 57]] +[[["North"], 106], [["North"], 71]] +[[["North"], 97], [["North"], 65]] +[[["Stop", "East"], 154], [["East"], 103]] +[[["East"], 156], [["East"], 101]] +[[["West"], 30], [["West"], 17]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["North"], 29], [["North"], 18]] +[[["North"], 50], [["North"], 31]] +[[["West"], 55], [["West"], 36]] +[[["East"], 29], [["East"], 16]] +[[["North"], 89], [["North"], 61]] +[[["East", "North"], 161], [["East", "North"], 121]] +[[["East", "North"], 221], [["East", "North"], 166]] +[[["North", "South"], 105], [["North", "South"], 77]] +[[["West"], 69], [["West"], 51]] +[[["West"], 94], [["West"], 69]] +[[["West", "Stop"], 57], [["West"], 42]] +[[["West", "Stop", "East"], 69], [["West", "East"], 49]] +[[["West", "Stop", "East"], 61], [["West", "East"], 41]] +[[["Stop", "East", "South"], 55], [["East", "South"], 37]] +[[["Stop", "East", "South"], 28], [["East", "South"], 19]] +[[["Stop", "East", "South"], 34], [["East", "South"], 23]] +[[["Stop", "East", "South"], 55], [["East", "South"], 37]] +[[["Stop", "East", "South"], 55], [["East", "South"], 37]] +[[["Stop", "East", "South"], 61], [["East", "South"], 41]] +[[["Stop", "East", "South"], 85], [["East", "South"], 57]] +[[["Stop", "East", "South"], 64], [["East", "South"], 43]] +[[["Stop", "East", "South"], 61], [["East", "South"], 41]] +[[["Stop", "East", "South"], 61], [["East", "South"], 41]] +[[["Stop", "East", "South"], 85], [["East", "South"], 57]] +[[["Stop", "East", "South"], 102], [["East", "South"], 67]] +[[["Stop", "South"], 23], [["South"], 13]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["East", "North"], 29], [["East", "North"], 18]] +[[["East"], 38], [["East"], 22]] +[[["North"], 29], [["North"], 18]] +[[["North"], 38], [["North"], 22]] +[[["East"], 33], [["East"], 22]] +[[["East"], 37], [["East"], 18]] +[[["East"], 18], [["East"], 12]] +[[["East"], 37], [["East"], 26]] +[[["East"], 69], [["East"], 41]] +[[["East"], 56], [["East"], 26]] +[[["East"], 44], [["East"], 29]] +[[["North", "South"], 83], [["North", "South"], 52]] +[[["East", "North"], 121], [["East", "North"], 74]] +[[["East", "North"], 97], [["East", "North"], 73]] +[[["North", "South"], 173], [["North", "South"], 130]] +[[["West", "East"], 90], [["West", "East"], 66]] +[[["West", "Stop", "East"], 161], [["West", "East"], 118]] +[[["Stop", "East", "South"], 58], [["East", "South"], 43]] +[[["Stop", "East"], 120], [["South"], 85]] +[[["East"], 78], [["East"], 45]] +[[["West"], 77], [["West"], 42]] +[[["South"], 83], [["South"], 48]] +[[["South"], 49], [["South"], 37]] +[[["South"], 185], [["South"], 104]] +[[["South"], 68], [["South"], 41]] +[[["West"], 30], [["West"], 18]] +[[["West"], 56], [["West"], 29]] +[[["West"], 14], [["West"], 10]] +[[["West"], 20], [["West"], 14]] +[[["West"], 13], [["West"], 9]] +[[["West"], 13], [["West"], 9]] +[[["West"], 16], [["West"], 12]] +[[["West", "North"], 30], [["West", "North"], 20]] +[[["West"], 38], [["West"], 23]] +[[["West", "Stop", "East", "North"], 70], [["West", "East", "North"], 46]] +[[["West", "Stop", "East"], 128], [["West", "East"], 89]] +[[["West", "Stop", "East"], 31], [["West", "East"], 20]] +[[["Stop", "East", "North"], 69], [["East", "North"], 45]] +[[["Stop", "North"], 58], [["North"], 31]] +[[["North"], 34], [["North"], 19]] +[[["North"], 30], [["North"], 17]] +[[["North"], 19], [["North"], 11]] +[[["North"], 34], [["North"], 19]] +[[["East"], 30], [["East"], 17]] +[[["East"], 19], [["East"], 11]] +[[["East"], 44], [["East"], 29]] +[[["East", "South"], 87], [["East", "South"], 60]] +[[["East", "South"], 108], [["East", "South"], 62]] +[[["South"], 120], [["South"], 61]] +[[["North", "South"], 209], [["North", "South"], 132]] +[[["West"], 108], [["West"], 60]] +[[["West", "Stop", "East", "South"], 83], [["West", "East", "South"], 61]] +[[["West", "Stop", "East", "South"], 90], [["West", "East", "South"], 66]] +[[["West", "Stop", "East"], 134], [["West", "East"], 95]] +[[["West", "Stop", "East"], 82], [["West", "East"], 55]] +[[["Stop", "East", "South"], 142], [["East", "South"], 95]] +[[["Stop", "East", "South"], 98], [["East", "South"], 65]] +[[["Stop", "East", "South"], 128], [["East", "South"], 86]] +[[["Stop", "East", "South"], 82], [["East", "South"], 55]] +[[["Stop", "East", "South"], 85], [["East", "South"], 57]] +[[["Stop", "East", "South"], 190], [["East", "South"], 127]] +[[["Stop", "East", "South"], 158], [["East", "South"], 103]] +[[["Stop", "South"], 50], [["South"], 27]] +[[["South"], 30], [["South"], 17]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["East", "North"], 29], [["East", "North"], 18]] +[[["East"], 37], [["East"], 22]] +[[["East", "North"], 41], [["East", "North"], 24]] +[[["East"], 59], [["East"], 29]] +[[["East"], 19], [["East"], 11]] +[[["East"], 26], [["East"], 15]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["East"], 29], [["East"], 18]] +[[["East"], 37], [["East"], 22]] +[[["East", "North"], 41], [["East", "North"], 24]] +[[["East"], 59], [["East"], 29]] +[[["East"], 19], [["East"], 11]] +[[["North"], 26], [["North"], 15]] +[[["North"], 19], [["North"], 11]] +[[["North"], 30], [["North"], 17]] +[[["North"], 34], [["North"], 19]] +[[["West"], 34], [["West"], 19]] +[[["West"], 25], [["West"], 13]] +[[["West", "Stop", "East"], 7], [["West", "East"], 3]] +""" +altDepthActions: """ +[["West", "East"], ["West", "East"], ["West", "East"], ["West", "East"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["Stop", "North"], ["North"]] +[["East"], ["East"], ["Stop", "East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["West"], ["West"], ["West"], ["West"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["West"], ["West"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["North", "South"], ["North", "South"], ["North"], ["North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West", "Stop"], ["West"]] +[["West"], ["West"], ["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North", "South"], ["North", "South"], ["South"], ["South"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["North", "South"], ["North", "South"], ["North"], ["North"]] +[["West", "East"], ["West", "East"], ["East"], ["East"]] +[["West"], ["West"], ["East"], ["East"]] +[["Stop", "East", "South"], ["East", "South"], ["East"], ["East"]] +[["Stop", "East"], ["East"], ["Stop", "East"], ["South"]] +[["East"], ["East"], ["East"], ["East"]] +[["West"], ["West"], ["West"], ["West"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["West", "East"], ["West", "East"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "Stop", "East", "North"], ["West", "East", "North"], ["West", "Stop", "East", "North"], ["West", "East", "North"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "North"], ["East", "North"], ["Stop", "East", "North"], ["East", "North"]] +[["Stop", "North"], ["North"], ["Stop", "North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]] +[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]] +[["South"], ["South"], ["South"], ["South"]] +[["North", "South"], ["North", "South"], ["North", "South"], ["North", "South"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +""" +partialPlyBugActions: """ +[["West", "East"], ["West", "East"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "North"], ["West", "North"]] +[["West"], ["West"]] +[["West", "North"], ["West", "North"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["East"], ["East"]] +[["Stop", "East"], ["East"]] +[["West"], ["West"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["West"], ["West"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"]] +[["North", "South"], ["North", "South"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "Stop"], ["West"]] +[["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["North", "South"], ["North", "South"]] +[["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"]] +[["North", "South"], ["North", "South"]] +[["West", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East"], ["East"]] +[["East"], ["East"]] +[["West"], ["West"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "North"], ["West", "North"]] +[["West"], ["West"]] +[["West", "Stop", "East", "North"], ["West", "East", "North"]] +[["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "North"], ["East", "North"]] +[["Stop", "North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "South"], ["East", "South"]] +[["East", "South"], ["East", "South"]] +[["South"], ["South"]] +[["North", "South"], ["North", "South"]] +[["West"], ["West"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "Stop", "East"], ["West", "East"]] +""" diff --git a/multiagent/test_cases/q2/8-pacman-game.test b/multiagent/test_cases/q2/8-pacman-game.test new file mode 100644 index 0000000..de8df6f --- /dev/null +++ b/multiagent/test_cases/q2/8-pacman-game.test @@ -0,0 +1,19 @@ +class: "PacmanGameTreeTest" +alg: "MinimaxAgent" +seed: "0" +depth: "2" +max_points: "4" + +# The following specifies the layout to be used +layoutName: "smallClassic" +layout: """ +%%%%%%%%%%%%%%%%%%%% +%......%G G%......% +%.%%...%% %%...%%.% +%.%o.%........%.o%.% +%.%%.%.%%%%%%.%.%%.% +%........P.........% +%%%%%%%%%%%%%%%%%%%% +""" + + diff --git a/multiagent/test_cases/q2/CONFIG b/multiagent/test_cases/q2/CONFIG new file mode 100644 index 0000000..2173a9f --- /dev/null +++ b/multiagent/test_cases/q2/CONFIG @@ -0,0 +1,2 @@ +max_points: "5" +class: "PassAllTestsQuestion" diff --git a/multiagent/test_cases/q3/0-eval-function-lose-states-1.solution b/multiagent/test_cases/q3/0-eval-function-lose-states-1.solution new file mode 100644 index 0000000..3a2bf0d --- /dev/null +++ b/multiagent/test_cases/q3/0-eval-function-lose-states-1.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/0-eval-function-lose-states-1.test. +action: "Left" +generated: "lose1 lose2 root" diff --git a/multiagent/test_cases/q3/0-eval-function-lose-states-1.test b/multiagent/test_cases/q3/0-eval-function-lose-states-1.test new file mode 100644 index 0000000..60d3424 --- /dev/null +++ b/multiagent/test_cases/q3/0-eval-function-lose-states-1.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "2" + +diagram: """ + root + / \ + lose1 lose2 + 1 0 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on losing states. +""" +num_agents: "2" + +start_state: "root" +win_states: "" +lose_states: "lose1 lose2" + +successors: """ +root Left lose1 +root Right lose2 +""" + +evaluation: """ +lose1 1.0 +lose2 0.0 +""" + diff --git a/multiagent/test_cases/q3/0-eval-function-lose-states-2.solution b/multiagent/test_cases/q3/0-eval-function-lose-states-2.solution new file mode 100644 index 0000000..06e4e38 --- /dev/null +++ b/multiagent/test_cases/q3/0-eval-function-lose-states-2.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/0-eval-function-lose-states-2.test. +action: "Right" +generated: "lose1 lose2 root" diff --git a/multiagent/test_cases/q3/0-eval-function-lose-states-2.test b/multiagent/test_cases/q3/0-eval-function-lose-states-2.test new file mode 100644 index 0000000..b6542df --- /dev/null +++ b/multiagent/test_cases/q3/0-eval-function-lose-states-2.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "2" + +diagram: """ + root + / \ + lose1 lose2 + 0 1 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on losing states. +""" +num_agents: "2" + +start_state: "root" +win_states: "" +lose_states: "lose1 lose2" + +successors: """ +root Left lose1 +root Right lose2 +""" + +evaluation: """ +lose1 0.0 +lose2 1.0 +""" + diff --git a/multiagent/test_cases/q3/0-eval-function-win-states-1.solution b/multiagent/test_cases/q3/0-eval-function-win-states-1.solution new file mode 100644 index 0000000..422d322 --- /dev/null +++ b/multiagent/test_cases/q3/0-eval-function-win-states-1.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/0-eval-function-win-states-1.test. +action: "Left" +generated: "root win1 win2" diff --git a/multiagent/test_cases/q3/0-eval-function-win-states-1.test b/multiagent/test_cases/q3/0-eval-function-win-states-1.test new file mode 100644 index 0000000..e23e977 --- /dev/null +++ b/multiagent/test_cases/q3/0-eval-function-win-states-1.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "2" + +diagram: """ + root + / \ + win1 win2 + 1 0 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on winning states. +""" +num_agents: "2" + +start_state: "root" +win_states: "win1 win2" +lose_states: "" + +successors: """ +root Left win1 +root Right win2 +""" + +evaluation: """ +win1 1.0 +win2 0.0 +""" + diff --git a/multiagent/test_cases/q3/0-eval-function-win-states-2.solution b/multiagent/test_cases/q3/0-eval-function-win-states-2.solution new file mode 100644 index 0000000..7c56b64 --- /dev/null +++ b/multiagent/test_cases/q3/0-eval-function-win-states-2.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/0-eval-function-win-states-2.test. +action: "Right" +generated: "root win1 win2" diff --git a/multiagent/test_cases/q3/0-eval-function-win-states-2.test b/multiagent/test_cases/q3/0-eval-function-win-states-2.test new file mode 100644 index 0000000..7d48572 --- /dev/null +++ b/multiagent/test_cases/q3/0-eval-function-win-states-2.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "2" + +diagram: """ + root + / \ + win1 win2 + 0 1 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on winning states. +""" +num_agents: "2" + +start_state: "root" +win_states: "win1 win2" +lose_states: "" + +successors: """ +root Left win1 +root Right win2 +""" + +evaluation: """ +win1 0.0 +win2 1.0 +""" + diff --git a/multiagent/test_cases/q3/0-lecture-6-tree.solution b/multiagent/test_cases/q3/0-lecture-6-tree.solution new file mode 100644 index 0000000..c5778a9 --- /dev/null +++ b/multiagent/test_cases/q3/0-lecture-6-tree.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/0-lecture-6-tree.test. +action: "Center" +generated: "A B C D E F G H max min1 min2 min3" diff --git a/multiagent/test_cases/q3/0-lecture-6-tree.test b/multiagent/test_cases/q3/0-lecture-6-tree.test new file mode 100644 index 0000000..73b12d3 --- /dev/null +++ b/multiagent/test_cases/q3/0-lecture-6-tree.test @@ -0,0 +1,50 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "2" + +# Tree from lecture 6 slides +diagram: """ + max + /-/ | \--\ + / | \ + / | \ + min1 min2 min3 + /|\ /|\ /|\ + / | \ / | \ / | \ +A B C D E F G H I +3 12 8 5 4 6 14 1 11 +""" + +num_agents: "2" + +start_state: "max" +win_states: "A B C D E F G H I" +lose_states: "" + +successors: """ +max Left min1 +max Center min2 +max Right min3 +min1 Left A +min1 Center B +min1 Right C +min2 Left D +min2 Center E +min2 Right F +min3 Left G +min3 Center H +min3 Right I +""" + + +evaluation: """ +A 3.0 +B 12.0 +C 8.0 +D 5.0 +E 4.0 +F 6.0 +G 14.0 +H 1.0 +I 11.0 +""" diff --git a/multiagent/test_cases/q3/0-small-tree.solution b/multiagent/test_cases/q3/0-small-tree.solution new file mode 100644 index 0000000..af8745b --- /dev/null +++ b/multiagent/test_cases/q3/0-small-tree.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/0-small-tree.test. +action: "pacLeft" +generated: "A B C minLeft minRight root" diff --git a/multiagent/test_cases/q3/0-small-tree.test b/multiagent/test_cases/q3/0-small-tree.test new file mode 100644 index 0000000..6e2accf --- /dev/null +++ b/multiagent/test_cases/q3/0-small-tree.test @@ -0,0 +1,36 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "3" + +diagram: """ + root + / \ + minLeft minRight + / \ / \ + A B C deeper + 4 3 2 | + D + 1000 +""" +num_agents: "2" + +start_state: "root" +win_states: "A C" +lose_states: "B D" + +successors: """ +root pacLeft minLeft +root pacRight minRight +minLeft gLeft A +minLeft gRight B +minRight gLeft C +minRight gRight deeper +deeper pacLeft D +""" + +evaluation: """ +A 4.0 +B 3.0 +C 2.0 +D 1000.0 +""" diff --git a/multiagent/test_cases/q3/1-1-minmax.solution b/multiagent/test_cases/q3/1-1-minmax.solution new file mode 100644 index 0000000..dd9a115 --- /dev/null +++ b/multiagent/test_cases/q3/1-1-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/1-1-minmax.test. +action: "Left" +generated: "a b1 b2 c1 c2 cx d1 d2 d3 dx" diff --git a/multiagent/test_cases/q3/1-1-minmax.test b/multiagent/test_cases/q3/1-1-minmax.test new file mode 100644 index 0000000..dca9cc3 --- /dev/null +++ b/multiagent/test_cases/q3/1-1-minmax.test @@ -0,0 +1,47 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | + c1 c2 cx + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -3.01 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -3.01 +""" diff --git a/multiagent/test_cases/q3/1-2-minmax.solution b/multiagent/test_cases/q3/1-2-minmax.solution new file mode 100644 index 0000000..92f7548 --- /dev/null +++ b/multiagent/test_cases/q3/1-2-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/1-2-minmax.test. +action: "Right" +generated: "a b1 b2 c1 c2 cx d1 d2 d3 dx" diff --git a/multiagent/test_cases/q3/1-2-minmax.test b/multiagent/test_cases/q3/1-2-minmax.test new file mode 100644 index 0000000..fba18ca --- /dev/null +++ b/multiagent/test_cases/q3/1-2-minmax.test @@ -0,0 +1,47 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | + c1 c2 cx + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -2.99 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -2.99 +""" diff --git a/multiagent/test_cases/q3/1-3-minmax.solution b/multiagent/test_cases/q3/1-3-minmax.solution new file mode 100644 index 0000000..2ddbcbd --- /dev/null +++ b/multiagent/test_cases/q3/1-3-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/1-3-minmax.test. +action: "Left" +generated: "a b1 b2 c3 cx d5 d6 dx" diff --git a/multiagent/test_cases/q3/1-3-minmax.test b/multiagent/test_cases/q3/1-3-minmax.test new file mode 100644 index 0000000..90f6f74 --- /dev/null +++ b/multiagent/test_cases/q3/1-3-minmax.test @@ -0,0 +1,47 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + cx c3 c4 + | / \ / \ + dx d5 d6 d7 d8 + 4.01 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b2 is 4. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 4.01 +""" diff --git a/multiagent/test_cases/q3/1-4-minmax.solution b/multiagent/test_cases/q3/1-4-minmax.solution new file mode 100644 index 0000000..bdeab5d --- /dev/null +++ b/multiagent/test_cases/q3/1-4-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/1-4-minmax.test. +action: "Right" +generated: "a b1 b2 c3 c4 cx d5 d6 d7 d8 dx" diff --git a/multiagent/test_cases/q3/1-4-minmax.test b/multiagent/test_cases/q3/1-4-minmax.test new file mode 100644 index 0000000..3d9434d --- /dev/null +++ b/multiagent/test_cases/q3/1-4-minmax.test @@ -0,0 +1,47 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + cx c3 c4 + | / \ / \ + dx d5 d6 d7 d8 + 3.99 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b2 is 4. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 3.99 +""" diff --git a/multiagent/test_cases/q3/1-5-minmax.solution b/multiagent/test_cases/q3/1-5-minmax.solution new file mode 100644 index 0000000..3fa97d4 --- /dev/null +++ b/multiagent/test_cases/q3/1-5-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/1-5-minmax.test. +action: "Right" +generated: "A B C D E F G Z a b1 b2 c1 c2 cx d1 d2 d3 d4 dx" diff --git a/multiagent/test_cases/q3/1-5-minmax.test b/multiagent/test_cases/q3/1-5-minmax.test new file mode 100644 index 0000000..c850e0f --- /dev/null +++ b/multiagent/test_cases/q3/1-5-minmax.test @@ -0,0 +1,75 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | + c1 c2 cx + / \ / \ | + d1 d2 d3 d4 dx + / \ / \ / \ / \ | + A B C D E F G H Z +-3 13 5 9 10 3 -6 8 3.01 + +a - max +b - min +c - max +d - min + +Note the minimax value of b1 is 3. +""" +num_agents: "2" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P Z" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +dx Down Z +""" + +evaluation: """ +A -3.0 +B 13.0 +C 5.0 +D 9.0 +E 10.0 +F 3.0 +G -6.0 +H 8.0 +Z 3.01 +""" diff --git a/multiagent/test_cases/q3/1-6-minmax.solution b/multiagent/test_cases/q3/1-6-minmax.solution new file mode 100644 index 0000000..9608b22 --- /dev/null +++ b/multiagent/test_cases/q3/1-6-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/1-6-minmax.test. +action: "Left" +generated: "A B C D E F G Z a b1 b2 c1 c2 cx d1 d2 d3 d4 dx" diff --git a/multiagent/test_cases/q3/1-6-minmax.test b/multiagent/test_cases/q3/1-6-minmax.test new file mode 100644 index 0000000..361fdaf --- /dev/null +++ b/multiagent/test_cases/q3/1-6-minmax.test @@ -0,0 +1,75 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | + c1 c2 cx + / \ / \ | + d1 d2 d3 d4 dx + / \ / \ / \ / \ | + A B C D E F G H Z +-3 13 5 9 10 3 -6 8 2.99 + +a - max +b - min +c - max +d - min + +Note the minimax value of b1 is 3. +""" +num_agents: "2" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P Z" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +dx Down Z +""" + +evaluation: """ +A -3.0 +B 13.0 +C 5.0 +D 9.0 +E 10.0 +F 3.0 +G -6.0 +H 8.0 +Z 2.99 +""" diff --git a/multiagent/test_cases/q3/1-7-minmax.solution b/multiagent/test_cases/q3/1-7-minmax.solution new file mode 100644 index 0000000..fb8c1ca --- /dev/null +++ b/multiagent/test_cases/q3/1-7-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/1-7-minmax.test. +action: "Left" +generated: "I J K M O P Z a b1 b2 c3 c4 cx d5 d6 d7 d8 dx" diff --git a/multiagent/test_cases/q3/1-7-minmax.test b/multiagent/test_cases/q3/1-7-minmax.test new file mode 100644 index 0000000..ed9970f --- /dev/null +++ b/multiagent/test_cases/q3/1-7-minmax.test @@ -0,0 +1,75 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + cx c3 c4 + | / \ / \ + dx d5 d6 d7 d8 + | / \ / \ / \ / \ + Z I J K L M N O P + -1.99 -1 -9 4 7 2 5 -3 -2 + +a - max +b - min +c - min +d - max + +Note that the minimax value of b2 is -2 +""" +num_agents: "3" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P Z" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +dx Down Z +""" + +evaluation: """ +I -1.0 +J -9.0 +K 4.0 +L 7.0 +M 2.0 +N 5.0 +O -3.0 +P -2.0 +Z -1.99 +""" diff --git a/multiagent/test_cases/q3/1-8-minmax.solution b/multiagent/test_cases/q3/1-8-minmax.solution new file mode 100644 index 0000000..f6f86ba --- /dev/null +++ b/multiagent/test_cases/q3/1-8-minmax.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/1-8-minmax.test. +action: "Right" +generated: "I J K M O P Z a b1 b2 c3 c4 cx d5 d6 d7 d8 dx" diff --git a/multiagent/test_cases/q3/1-8-minmax.test b/multiagent/test_cases/q3/1-8-minmax.test new file mode 100644 index 0000000..32c15da --- /dev/null +++ b/multiagent/test_cases/q3/1-8-minmax.test @@ -0,0 +1,75 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + cx c3 c4 + | / \ / \ + dx d5 d6 d7 d8 + | / \ / \ / \ / \ + Z I J K L M N O P + -2.01 -1 -9 4 7 2 5 -3 -2 + +a - max +b - min +c - min +d - max + +Note that the minimax value of b2 is -2.01 +""" +num_agents: "3" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P Z" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +dx Down Z +""" + +evaluation: """ +I -1.0 +J -9.0 +K 4.0 +L 7.0 +M 2.0 +N 5.0 +O -3.0 +P -2.0 +Z -2.01 +""" diff --git a/multiagent/test_cases/q3/2-1a-vary-depth.solution b/multiagent/test_cases/q3/2-1a-vary-depth.solution new file mode 100644 index 0000000..892546f --- /dev/null +++ b/multiagent/test_cases/q3/2-1a-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/2-1a-vary-depth.test. +action: "Left" +generated: "a b1 b2 c1 c2 cx" diff --git a/multiagent/test_cases/q3/2-1a-vary-depth.test b/multiagent/test_cases/q3/2-1a-vary-depth.test new file mode 100644 index 0000000..392ae13 --- /dev/null +++ b/multiagent/test_cases/q3/2-1a-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "1" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | +-4 c1 c2 9 cx -4.01 + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -4.01 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3, but the depth=1 limited value is -4. +The values next to c1, c2, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +c1 -4.0 +c2 9.0 +cx -4.01 +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -4.01 +""" diff --git a/multiagent/test_cases/q3/2-1b-vary-depth.solution b/multiagent/test_cases/q3/2-1b-vary-depth.solution new file mode 100644 index 0000000..66f3a6d --- /dev/null +++ b/multiagent/test_cases/q3/2-1b-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/2-1b-vary-depth.test. +action: "Left" +generated: "a b1 b2 c1 c2 cx d1 d2 d3 dx" diff --git a/multiagent/test_cases/q3/2-1b-vary-depth.test b/multiagent/test_cases/q3/2-1b-vary-depth.test new file mode 100644 index 0000000..fd4e46c --- /dev/null +++ b/multiagent/test_cases/q3/2-1b-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "2" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | +-4 c1 c2 9 cx -4.01 + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -4.01 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3, but the depth=1 limited value is -4. +The values next to c1, c2, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +c1 -4.0 +c2 9.0 +cx -4.01 +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -4.01 +""" diff --git a/multiagent/test_cases/q3/2-2a-vary-depth.solution b/multiagent/test_cases/q3/2-2a-vary-depth.solution new file mode 100644 index 0000000..71213b5 --- /dev/null +++ b/multiagent/test_cases/q3/2-2a-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/2-2a-vary-depth.test. +action: "Right" +generated: "a b1 b2 c1 c2 cx" diff --git a/multiagent/test_cases/q3/2-2a-vary-depth.test b/multiagent/test_cases/q3/2-2a-vary-depth.test new file mode 100644 index 0000000..a51f580 --- /dev/null +++ b/multiagent/test_cases/q3/2-2a-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "1" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | +-4 c1 c2 9 cx -3.99 + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -3.99 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3, but the depth=1 limited value is -4. +The values next to c1, c2, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +c1 -4.0 +c2 9.0 +cx -3.99 +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -3.99 +""" diff --git a/multiagent/test_cases/q3/2-2b-vary-depth.solution b/multiagent/test_cases/q3/2-2b-vary-depth.solution new file mode 100644 index 0000000..073eab4 --- /dev/null +++ b/multiagent/test_cases/q3/2-2b-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/2-2b-vary-depth.test. +action: "Left" +generated: "a b1 b2 c1 c2 cx d1 d2 d3 dx" diff --git a/multiagent/test_cases/q3/2-2b-vary-depth.test b/multiagent/test_cases/q3/2-2b-vary-depth.test new file mode 100644 index 0000000..44dfb63 --- /dev/null +++ b/multiagent/test_cases/q3/2-2b-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "2" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ | +-4 c1 c2 9 cx -3.99 + / \ / \ | + d1 d2 d3 d4 dx +-3 -9 10 6 -3.99 + +a - max +b - min +c - max + +Note that the minimax value of b1 is -3, but the depth=1 limited value is -4. +The values next to c1, c2, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Down cx +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +cx Down dx +""" + +evaluation: """ +c1 -4.0 +c2 9.0 +cx -3.99 +d1 -3.0 +d2 -9.0 +d3 10.0 +d4 6.0 +dx -3.99 +""" diff --git a/multiagent/test_cases/q3/2-3a-vary-depth.solution b/multiagent/test_cases/q3/2-3a-vary-depth.solution new file mode 100644 index 0000000..6cb00a6 --- /dev/null +++ b/multiagent/test_cases/q3/2-3a-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/2-3a-vary-depth.test. +action: "Left" +generated: "a b1 b2 c3 c4 cx" diff --git a/multiagent/test_cases/q3/2-3a-vary-depth.test b/multiagent/test_cases/q3/2-3a-vary-depth.test new file mode 100644 index 0000000..61a4ce5 --- /dev/null +++ b/multiagent/test_cases/q3/2-3a-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "1" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + 5.01 cx 8 c3 c4 5 + | / \ / \ + dx d5 d6 d7 d8 + 5.01 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b1 is 4, but the depth=1 limited value is 5. +The values next to c3, c4, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +c3 8.0 +c4 5.0 +cx 5.01 +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 5.01 +""" diff --git a/multiagent/test_cases/q3/2-3b-vary-depth.solution b/multiagent/test_cases/q3/2-3b-vary-depth.solution new file mode 100644 index 0000000..4ee5c53 --- /dev/null +++ b/multiagent/test_cases/q3/2-3b-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/2-3b-vary-depth.test. +action: "Left" +generated: "a b1 b2 c3 cx d5 d6 dx" diff --git a/multiagent/test_cases/q3/2-3b-vary-depth.test b/multiagent/test_cases/q3/2-3b-vary-depth.test new file mode 100644 index 0000000..c5dd181 --- /dev/null +++ b/multiagent/test_cases/q3/2-3b-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "2" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + 5.01 cx 8 c3 c4 5 + | / \ / \ + dx d5 d6 d7 d8 + 5.01 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b1 is 4, but the depth=1 limited value is 5. +The values next to c3, c4, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +c3 8.0 +c4 5.0 +cx 5.01 +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 5.01 +""" diff --git a/multiagent/test_cases/q3/2-4a-vary-depth.solution b/multiagent/test_cases/q3/2-4a-vary-depth.solution new file mode 100644 index 0000000..fa44304 --- /dev/null +++ b/multiagent/test_cases/q3/2-4a-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/2-4a-vary-depth.test. +action: "Right" +generated: "a b1 b2 c3 c4 cx" diff --git a/multiagent/test_cases/q3/2-4a-vary-depth.test b/multiagent/test_cases/q3/2-4a-vary-depth.test new file mode 100644 index 0000000..8f81995 --- /dev/null +++ b/multiagent/test_cases/q3/2-4a-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "1" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + 4.99 cx 8 c3 c4 5 + | / \ / \ + dx d5 d6 d7 d8 + 4.99 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b1 is 4, but the depth=1 limited value is 5. +The values next to c3, c4, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +c3 8.0 +c4 5.0 +cx 4.99 +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 4.99 +""" diff --git a/multiagent/test_cases/q3/2-4b-vary-depth.solution b/multiagent/test_cases/q3/2-4b-vary-depth.solution new file mode 100644 index 0000000..284c12d --- /dev/null +++ b/multiagent/test_cases/q3/2-4b-vary-depth.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/2-4b-vary-depth.test. +action: "Left" +generated: "a b1 b2 c3 cx d5 d6 dx" diff --git a/multiagent/test_cases/q3/2-4b-vary-depth.test b/multiagent/test_cases/q3/2-4b-vary-depth.test new file mode 100644 index 0000000..819ed51 --- /dev/null +++ b/multiagent/test_cases/q3/2-4b-vary-depth.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "2" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + | / \ + 4.99 cx 8 c3 c4 5 + | / \ / \ + dx d5 d6 d7 d8 + 4.99 4 -7 0 5 + +a - max +b - min +c - max + +Note that the minimax value of b1 is 4, but the depth=1 limited value is 5. +The values next to c3, c4, and cx are the values of the evaluation function, not +necessarily the correct minimax backup. +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Down cx +b2 Left c3 +b2 Right c4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +cx Down dx +""" + +evaluation: """ +c3 8.0 +c4 5.0 +cx 4.99 +d5 4.0 +d6 -7.0 +d7 0.0 +d8 5.0 +dx 4.99 +""" diff --git a/multiagent/test_cases/q3/2-one-ghost-3level.solution b/multiagent/test_cases/q3/2-one-ghost-3level.solution new file mode 100644 index 0000000..77e669a --- /dev/null +++ b/multiagent/test_cases/q3/2-one-ghost-3level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/2-one-ghost-3level.test. +action: "Left" +generated: "a b1 b2 c1 c2 c3 d1 d2 d3 d5 d6" diff --git a/multiagent/test_cases/q3/2-one-ghost-3level.test b/multiagent/test_cases/q3/2-one-ghost-3level.test new file mode 100644 index 0000000..99aa2fb --- /dev/null +++ b/multiagent/test_cases/q3/2-one-ghost-3level.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 + 3 9 10 6 4 7 0 5 + +a - max +b - min +c - max +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +""" + +evaluation: """ +d1 3.0 +d2 9.0 +d3 10.0 +d4 6.0 +d5 4.0 +d6 7.0 +d7 0.0 +d8 5.0 +""" diff --git a/multiagent/test_cases/q3/3-one-ghost-4level.solution b/multiagent/test_cases/q3/3-one-ghost-4level.solution new file mode 100644 index 0000000..34dd2ce --- /dev/null +++ b/multiagent/test_cases/q3/3-one-ghost-4level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/3-one-ghost-4level.test. +action: "Left" +generated: "A B C D E F I K a b1 b2 c1 c2 c3 d1 d2 d3 d5 d6" diff --git a/multiagent/test_cases/q3/3-one-ghost-4level.test b/multiagent/test_cases/q3/3-one-ghost-4level.test new file mode 100644 index 0000000..7035a84 --- /dev/null +++ b/multiagent/test_cases/q3/3-one-ghost-4level.test @@ -0,0 +1,79 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 +/ \ / \ / \ / \ / \ / \ / \ / \ +A B C D E F G H I J K L M N O P +3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14 + +a - max +b - min +c - max +d - min +""" +num_agents: "2" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +""" + +evaluation: """ +A 3.0 +B 13.0 +C 5.0 +D 9.0 +E 10.0 +F 11.0 +G 6.0 +H 8.0 +I 1.0 +J 0.0 +K 4.0 +L 7.0 +M 12.0 +N 15.0 +O 2.0 +P 14.0 +""" diff --git a/multiagent/test_cases/q3/4-two-ghosts-3level.solution b/multiagent/test_cases/q3/4-two-ghosts-3level.solution new file mode 100644 index 0000000..102f443 --- /dev/null +++ b/multiagent/test_cases/q3/4-two-ghosts-3level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/4-two-ghosts-3level.test. +action: "Left" +generated: "a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7" diff --git a/multiagent/test_cases/q3/4-two-ghosts-3level.test b/multiagent/test_cases/q3/4-two-ghosts-3level.test new file mode 100644 index 0000000..d20ba31 --- /dev/null +++ b/multiagent/test_cases/q3/4-two-ghosts-3level.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 + 3 9 10 6 4 7 0 5 + +a - max +b - min +c - min +""" +num_agents: "3" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +""" + +evaluation: """ +d1 3.0 +d2 9.0 +d3 10.0 +d4 6.0 +d5 4.0 +d6 7.0 +d7 0.0 +d8 5.0 +""" diff --git a/multiagent/test_cases/q3/5-two-ghosts-4level.solution b/multiagent/test_cases/q3/5-two-ghosts-4level.solution new file mode 100644 index 0000000..614a8b4 --- /dev/null +++ b/multiagent/test_cases/q3/5-two-ghosts-4level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/5-two-ghosts-4level.test. +action: "Left" +generated: "A B C D E G H I J a b1 b2 c1 c2 c3 d1 d2 d3 d4 d5" diff --git a/multiagent/test_cases/q3/5-two-ghosts-4level.test b/multiagent/test_cases/q3/5-two-ghosts-4level.test new file mode 100644 index 0000000..343665b --- /dev/null +++ b/multiagent/test_cases/q3/5-two-ghosts-4level.test @@ -0,0 +1,79 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 +/ \ / \ / \ / \ / \ / \ / \ / \ +A B C D E F G H I J K L M N O P +3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14 + +a - max +b - min +c - min +d - max +""" +num_agents: "3" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +""" + +evaluation: """ +A 3.0 +B 13.0 +C 5.0 +D 9.0 +E 10.0 +F 11.0 +G 6.0 +H 8.0 +I 1.0 +J 0.0 +K 4.0 +L 7.0 +M 12.0 +N 15.0 +O 2.0 +P 14.0 +""" diff --git a/multiagent/test_cases/q3/6-tied-root.solution b/multiagent/test_cases/q3/6-tied-root.solution new file mode 100644 index 0000000..36f3c1d --- /dev/null +++ b/multiagent/test_cases/q3/6-tied-root.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/6-tied-root.test. +action: "Left" +generated: "A B C max min1 min2" diff --git a/multiagent/test_cases/q3/6-tied-root.test b/multiagent/test_cases/q3/6-tied-root.test new file mode 100644 index 0000000..b0ba81c --- /dev/null +++ b/multiagent/test_cases/q3/6-tied-root.test @@ -0,0 +1,31 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "3" + +diagram: """ + max + / \ +min1 min2 + | / \ + A B C +10 10 0 +""" +num_agents: "2" + +start_state: "max" +win_states: "A B" +lose_states: "C" + +successors: """ +max Left min1 +max Right min2 +min1 Down A +min2 Left B +min2 Right C +""" + +evaluation: """ +A 10.0 +B 10.0 +C 0.0 +""" diff --git a/multiagent/test_cases/q3/7-1a-check-depth-one-ghost.solution b/multiagent/test_cases/q3/7-1a-check-depth-one-ghost.solution new file mode 100644 index 0000000..d03bfc1 --- /dev/null +++ b/multiagent/test_cases/q3/7-1a-check-depth-one-ghost.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/7-1a-check-depth-one-ghost.test. +action: "Left" +generated: "a b1 b2 b3 c1 c2 c3" diff --git a/multiagent/test_cases/q3/7-1a-check-depth-one-ghost.test b/multiagent/test_cases/q3/7-1a-check-depth-one-ghost.test new file mode 100644 index 0000000..da5b428 --- /dev/null +++ b/multiagent/test_cases/q3/7-1a-check-depth-one-ghost.test @@ -0,0 +1,83 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "1" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 10 c1 0 c2 c3 8 + | | | + 0 d1 0 d2 d3 8 + | | | + 0 e1 10 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + g1 g2 g3 + 0 0 8 + +a - max +b - min +c - max +d - min +e - max +f - min + +At depth 1, the evaluation function is called at level c, +so Left should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "2" + +start_state: "a" +win_states: "g1 g2 g3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 10.0 +c2 0.0 +c3 8.0 +d1 0.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 10.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 0.0 +g3 8.0 +""" + + diff --git a/multiagent/test_cases/q3/7-1b-check-depth-one-ghost.solution b/multiagent/test_cases/q3/7-1b-check-depth-one-ghost.solution new file mode 100644 index 0000000..f68218d --- /dev/null +++ b/multiagent/test_cases/q3/7-1b-check-depth-one-ghost.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/7-1b-check-depth-one-ghost.test. +action: "Center" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3" diff --git a/multiagent/test_cases/q3/7-1b-check-depth-one-ghost.test b/multiagent/test_cases/q3/7-1b-check-depth-one-ghost.test new file mode 100644 index 0000000..ee3e750 --- /dev/null +++ b/multiagent/test_cases/q3/7-1b-check-depth-one-ghost.test @@ -0,0 +1,83 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "2" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 10 c1 0 c2 c3 8 + | | | + 0 d1 0 d2 d3 8 + | | | + 0 e1 10 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + g1 g2 g3 + 0 0 8 + +a - max +b - min +c - max +d - min +e - max +f - min + +At depth 2, the evaluation function is called at level e, +so Center should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "2" + +start_state: "a" +win_states: "g1 g2 g3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 10.0 +c2 0.0 +c3 8.0 +d1 0.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 10.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 0.0 +g3 8.0 +""" + + diff --git a/multiagent/test_cases/q3/7-1c-check-depth-one-ghost.solution b/multiagent/test_cases/q3/7-1c-check-depth-one-ghost.solution new file mode 100644 index 0000000..3d5c062 --- /dev/null +++ b/multiagent/test_cases/q3/7-1c-check-depth-one-ghost.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/7-1c-check-depth-one-ghost.test. +action: "Right" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3" diff --git a/multiagent/test_cases/q3/7-1c-check-depth-one-ghost.test b/multiagent/test_cases/q3/7-1c-check-depth-one-ghost.test new file mode 100644 index 0000000..1e342bf --- /dev/null +++ b/multiagent/test_cases/q3/7-1c-check-depth-one-ghost.test @@ -0,0 +1,83 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "3" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 10 c1 0 c2 c3 8 + | | | + 0 d1 0 d2 d3 8 + | | | + 0 e1 10 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + g1 g2 g3 + 0 0 8 + +a - max +b - min +c - max +d - min +e - max +f - min + +At depth 3, the evaluation function is called at level g, +so Right should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "2" + +start_state: "a" +win_states: "g1 g2 g3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 10.0 +c2 0.0 +c3 8.0 +d1 0.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 10.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 0.0 +g3 8.0 +""" + + diff --git a/multiagent/test_cases/q3/7-2a-check-depth-two-ghosts.solution b/multiagent/test_cases/q3/7-2a-check-depth-two-ghosts.solution new file mode 100644 index 0000000..98246da --- /dev/null +++ b/multiagent/test_cases/q3/7-2a-check-depth-two-ghosts.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/7-2a-check-depth-two-ghosts.test. +action: "Left" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3" diff --git a/multiagent/test_cases/q3/7-2a-check-depth-two-ghosts.test b/multiagent/test_cases/q3/7-2a-check-depth-two-ghosts.test new file mode 100644 index 0000000..d22beda --- /dev/null +++ b/multiagent/test_cases/q3/7-2a-check-depth-two-ghosts.test @@ -0,0 +1,110 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "1" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 0 c1 0 c2 c3 8 + | | | + 10 d1 0 d2 d3 8 + | | | + 0 e1 0 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + 0 g1 10 g2 g3 8 + | | | + 0 h1 0 h2 h3 8 + | | | + 0 i1 0 i2 i3 8 + | | | + j1 j2 j3 + 0 0 8 + +a - max +b - min +c - min +d - max +e - min +f - min +g - max +h - min +i - min + +At depth 1, the evaluation function is called at level d, +so Left should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "3" + +start_state: "a" +win_states: "j1 j2 j3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +g1 Center h1 +g2 Center h2 +g3 Center h3 +h1 Center i1 +h2 Center i2 +h3 Center i3 +i1 Center j1 +i2 Center j2 +i3 Center j3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 0.0 +c2 0.0 +c3 8.0 +d1 10.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 0.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 10.0 +g3 8.0 +h1 0.0 +h2 0.0 +h3 8.0 +i1 0.0 +i2 0.0 +i3 8.0 +j1 0.0 +j2 0.0 +j3 8.0 +""" + + diff --git a/multiagent/test_cases/q3/7-2b-check-depth-two-ghosts.solution b/multiagent/test_cases/q3/7-2b-check-depth-two-ghosts.solution new file mode 100644 index 0000000..0920b08 --- /dev/null +++ b/multiagent/test_cases/q3/7-2b-check-depth-two-ghosts.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/7-2b-check-depth-two-ghosts.test. +action: "Center" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3" diff --git a/multiagent/test_cases/q3/7-2b-check-depth-two-ghosts.test b/multiagent/test_cases/q3/7-2b-check-depth-two-ghosts.test new file mode 100644 index 0000000..f0c4a4c --- /dev/null +++ b/multiagent/test_cases/q3/7-2b-check-depth-two-ghosts.test @@ -0,0 +1,110 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "2" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 0 c1 0 c2 c3 8 + | | | + 10 d1 0 d2 d3 8 + | | | + 0 e1 0 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + 0 g1 10 g2 g3 8 + | | | + 0 h1 0 h2 h3 8 + | | | + 0 i1 0 i2 i3 8 + | | | + j1 j2 j3 + 0 0 8 + +a - max +b - min +c - min +d - max +e - min +f - min +g - max +h - min +i - min + +At depth 2, the evaluation function is called at level g, +so Center should be returned. If your algorithm is returning +a different action, check how you implemented your depth. +""" + +num_agents: "3" + +start_state: "a" +win_states: "j1 j2 j3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +g1 Center h1 +g2 Center h2 +g3 Center h3 +h1 Center i1 +h2 Center i2 +h3 Center i3 +i1 Center j1 +i2 Center j2 +i3 Center j3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 0.0 +c2 0.0 +c3 8.0 +d1 10.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 0.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 10.0 +g3 8.0 +h1 0.0 +h2 0.0 +h3 8.0 +i1 0.0 +i2 0.0 +i3 8.0 +j1 0.0 +j2 0.0 +j3 8.0 +""" + + diff --git a/multiagent/test_cases/q3/7-2c-check-depth-two-ghosts.solution b/multiagent/test_cases/q3/7-2c-check-depth-two-ghosts.solution new file mode 100644 index 0000000..61f84f1 --- /dev/null +++ b/multiagent/test_cases/q3/7-2c-check-depth-two-ghosts.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q3/7-2c-check-depth-two-ghosts.test. +action: "Right" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3 h1 h2 h3 i1 i2 i3 j1 j2 j3" diff --git a/multiagent/test_cases/q3/7-2c-check-depth-two-ghosts.test b/multiagent/test_cases/q3/7-2c-check-depth-two-ghosts.test new file mode 100644 index 0000000..c8df42c --- /dev/null +++ b/multiagent/test_cases/q3/7-2c-check-depth-two-ghosts.test @@ -0,0 +1,110 @@ +class: "GraphGameTreeTest" +alg: "AlphaBetaAgent" +depth: "3" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 0 c1 0 c2 c3 8 + | | | + 10 d1 0 d2 d3 8 + | | | + 0 e1 0 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + 0 g1 10 g2 g3 8 + | | | + 0 h1 0 h2 h3 8 + | | | + 0 i1 0 i2 i3 8 + | | | + j1 j2 j3 + 0 0 8 + +a - max +b - min +c - min +d - max +e - min +f - min +g - max +h - min +i - min + +At depth 3, the evaluation function is called at level j, +so Right should be returned. If your algorithm is returning +a different action, check how you implemented your depth. +""" + +num_agents: "3" + +start_state: "a" +win_states: "j1 j2 j3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +g1 Center h1 +g2 Center h2 +g3 Center h3 +h1 Center i1 +h2 Center i2 +h3 Center i3 +i1 Center j1 +i2 Center j2 +i3 Center j3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 0.0 +c2 0.0 +c3 8.0 +d1 10.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 0.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 10.0 +g3 8.0 +h1 0.0 +h2 0.0 +h3 8.0 +i1 0.0 +i2 0.0 +i3 8.0 +j1 0.0 +j2 0.0 +j3 8.0 +""" + + diff --git a/multiagent/test_cases/q3/8-pacman-game.solution b/multiagent/test_cases/q3/8-pacman-game.solution new file mode 100644 index 0000000..faa6a40 --- /dev/null +++ b/multiagent/test_cases/q3/8-pacman-game.solution @@ -0,0 +1,444 @@ +optimalActions: """ +[[["West", "East"], 39], [["West", "East"], 27]] +[[["West"], 75], [["West"], 57]] +[[["West"], 95], [["West"], 77]] +[[["West", "North"], 72], [["West", "North"], 54]] +[[["West"], 51], [["West"], 39]] +[[["West", "North"], 76], [["West", "North"], 58]] +[[["West"], 69], [["West"], 51]] +[[["West"], 34], [["West"], 23]] +[[["North"], 55], [["North"], 41]] +[[["North"], 63], [["North"], 32]] +[[["North"], 87], [["North"], 43]] +[[["North"], 69], [["North"], 34]] +[[["Stop", "East"], 116], [["East"], 56]] +[[["East"], 110], [["East"], 52]] +[[["West"], 28], [["West"], 17]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["North"], 28], [["North"], 18]] +[[["North"], 47], [["North"], 30]] +[[["West"], 35], [["West"], 23]] +[[["East"], 28], [["East"], 15]] +[[["North"], 74], [["North"], 48]] +[[["East", "North"], 130], [["East", "North"], 87]] +[[["East", "North"], 213], [["East", "North"], 158]] +[[["North", "South"], 101], [["North", "South"], 73]] +[[["West"], 48], [["West"], 36]] +[[["West"], 60], [["West"], 45]] +[[["West", "Stop"], 49], [["West"], 36]] +[[["West", "Stop", "East"], 69], [["West", "East"], 49]] +[[["West", "Stop", "East"], 61], [["West", "East"], 41]] +[[["Stop", "East", "South"], 55], [["East", "South"], 37]] +[[["Stop", "East", "South"], 28], [["East", "South"], 19]] +[[["Stop", "East", "South"], 34], [["East", "South"], 23]] +[[["Stop", "East", "South"], 55], [["East", "South"], 37]] +[[["Stop", "East", "South"], 55], [["East", "South"], 37]] +[[["Stop", "East", "South"], 61], [["East", "South"], 41]] +[[["Stop", "East", "South"], 81], [["East", "South"], 53]] +[[["Stop", "East", "South"], 64], [["East", "South"], 43]] +[[["Stop", "East", "South"], 61], [["East", "South"], 41]] +[[["Stop", "East", "South"], 61], [["East", "South"], 41]] +[[["Stop", "East", "South"], 81], [["East", "South"], 53]] +[[["Stop", "East", "South"], 102], [["East", "South"], 67]] +[[["Stop", "South"], 23], [["South"], 13]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["East", "North"], 28], [["East", "North"], 18]] +[[["East"], 35], [["East"], 22]] +[[["North"], 28], [["North"], 18]] +[[["North"], 38], [["North"], 22]] +[[["East"], 30], [["East"], 20]] +[[["East"], 33], [["East"], 18]] +[[["East"], 18], [["East"], 12]] +[[["East"], 33], [["East"], 23]] +[[["East"], 48], [["East"], 29]] +[[["East"], 49], [["East"], 26]] +[[["East"], 44], [["East"], 29]] +[[["North", "South"], 75], [["North", "South"], 52]] +[[["East", "North"], 113], [["East", "North"], 74]] +[[["East", "North"], 89], [["East", "North"], 65]] +[[["North", "South"], 173], [["North", "South"], 130]] +[[["West", "East"], 54], [["West", "East"], 42]] +[[["West", "Stop", "East"], 130], [["West", "East"], 87]] +[[["Stop", "East", "South"], 58], [["East", "South"], 43]] +[[["Stop", "East"], 110], [["South"], 82]] +[[["East"], 71], [["East"], 45]] +[[["West"], 66], [["West"], 42]] +[[["South"], 70], [["South"], 48]] +[[["South"], 48], [["South"], 37]] +[[["South"], 179], [["South"], 104]] +[[["South"], 68], [["South"], 41]] +[[["West"], 25], [["West"], 18]] +[[["West"], 41], [["West"], 29]] +[[["West"], 14], [["West"], 10]] +[[["West"], 20], [["West"], 14]] +[[["West"], 13], [["West"], 9]] +[[["West"], 13], [["West"], 9]] +[[["West"], 16], [["West"], 12]] +[[["West", "North"], 28], [["West", "North"], 20]] +[[["West"], 30], [["West"], 23]] +[[["West", "Stop", "East", "North"], 66], [["West", "East", "North"], 46]] +[[["West", "Stop", "East"], 126], [["West", "East"], 89]] +[[["West", "Stop", "East"], 30], [["West", "East"], 20]] +[[["Stop", "East", "North"], 67], [["East", "North"], 45]] +[[["Stop", "North"], 52], [["North"], 27]] +[[["North"], 30], [["North"], 19]] +[[["North"], 28], [["North"], 17]] +[[["North"], 17], [["North"], 11]] +[[["North"], 30], [["North"], 19]] +[[["East"], 28], [["East"], 17]] +[[["East"], 17], [["East"], 11]] +[[["East"], 40], [["East"], 29]] +[[["East", "South"], 79], [["East", "South"], 60]] +[[["East", "South"], 90], [["East", "South"], 62]] +[[["South"], 96], [["South"], 61]] +[[["North", "South"], 195], [["North", "South"], 132]] +[[["West"], 80], [["West"], 60]] +[[["West", "Stop", "East", "South"], 83], [["West", "East", "South"], 61]] +[[["West", "Stop", "East", "South"], 90], [["West", "East", "South"], 66]] +[[["West", "Stop", "East"], 134], [["West", "East"], 95]] +[[["West", "Stop", "East"], 82], [["West", "East"], 55]] +[[["Stop", "East", "South"], 142], [["East", "South"], 95]] +[[["Stop", "East", "South"], 95], [["East", "South"], 62]] +[[["Stop", "East", "South"], 128], [["East", "South"], 86]] +[[["Stop", "East", "South"], 82], [["East", "South"], 55]] +[[["Stop", "East", "South"], 85], [["East", "South"], 57]] +[[["Stop", "East", "South"], 182], [["East", "South"], 119]] +[[["Stop", "East", "South"], 154], [["East", "South"], 103]] +[[["Stop", "South"], 46], [["South"], 25]] +[[["South"], 28], [["South"], 17]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["East", "North"], 28], [["East", "North"], 18]] +[[["East"], 37], [["East"], 22]] +[[["East", "North"], 34], [["East", "North"], 24]] +[[["East"], 56], [["East"], 29]] +[[["East"], 17], [["East"], 11]] +[[["East"], 26], [["East"], 15]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["East"], 27], [["East"], 17]] +[[["East"], 37], [["East"], 22]] +[[["East", "North"], 37], [["East", "North"], 24]] +[[["East"], 56], [["East"], 29]] +[[["East"], 19], [["East"], 11]] +[[["North"], 26], [["North"], 15]] +[[["North"], 19], [["North"], 11]] +[[["North"], 30], [["North"], 17]] +[[["North"], 30], [["North"], 19]] +[[["West"], 28], [["West"], 19]] +[[["West"], 23], [["West"], 13]] +[[["West", "Stop", "East"], 7], [["West", "East"], 3]] +""" +altDepthActions: """ +[["West", "East"], ["West", "East"], ["West", "East"], ["West", "East"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["Stop", "North"], ["North"]] +[["East"], ["East"], ["Stop", "East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["West"], ["West"], ["West"], ["West"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["West"], ["West"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["North", "South"], ["North", "South"], ["North"], ["North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West", "Stop"], ["West"]] +[["West"], ["West"], ["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North", "South"], ["North", "South"], ["South"], ["South"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["North", "South"], ["North", "South"], ["North"], ["North"]] +[["West", "East"], ["West", "East"], ["East"], ["East"]] +[["West"], ["West"], ["East"], ["East"]] +[["Stop", "East", "South"], ["East", "South"], ["East"], ["East"]] +[["Stop", "East"], ["East"], ["Stop", "East"], ["South"]] +[["East"], ["East"], ["East"], ["East"]] +[["West"], ["West"], ["West"], ["West"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["West", "East"], ["West", "East"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "Stop", "East", "North"], ["West", "East", "North"], ["West", "Stop", "East", "North"], ["West", "East", "North"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "North"], ["East", "North"], ["Stop", "East", "North"], ["East", "North"]] +[["Stop", "North"], ["North"], ["Stop", "North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]] +[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]] +[["South"], ["South"], ["South"], ["South"]] +[["North", "South"], ["North", "South"], ["North", "South"], ["North", "South"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +""" +partialPlyBugActions: """ +[["West", "East"], ["West", "East"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "North"], ["West", "North"]] +[["West"], ["West"]] +[["West", "North"], ["West", "North"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["East"], ["East"]] +[["Stop", "East"], ["East"]] +[["West"], ["West"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["West"], ["West"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"]] +[["North", "South"], ["North", "South"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "Stop"], ["West"]] +[["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["North", "South"], ["North", "South"]] +[["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"]] +[["North", "South"], ["North", "South"]] +[["West", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East"], ["East"]] +[["East"], ["East"]] +[["West"], ["West"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "North"], ["West", "North"]] +[["West"], ["West"]] +[["West", "Stop", "East", "North"], ["West", "East", "North"]] +[["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "North"], ["East", "North"]] +[["Stop", "North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "South"], ["East", "South"]] +[["East", "South"], ["East", "South"]] +[["South"], ["South"]] +[["North", "South"], ["North", "South"]] +[["West"], ["West"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "Stop", "East"], ["West", "East"]] +""" diff --git a/multiagent/test_cases/q3/8-pacman-game.test b/multiagent/test_cases/q3/8-pacman-game.test new file mode 100644 index 0000000..8439209 --- /dev/null +++ b/multiagent/test_cases/q3/8-pacman-game.test @@ -0,0 +1,19 @@ +class: "PacmanGameTreeTest" +alg: "AlphaBetaAgent" +seed: "0" +depth: "2" +max_points: "4" + +# The following specifies the layout to be used +layoutName: "smallClassic" +layout: """ +%%%%%%%%%%%%%%%%%%%% +%......%G G%......% +%.%%...%% %%...%%.% +%.%o.%........%.o%.% +%.%%.%.%%%%%%.%.%%.% +%........P.........% +%%%%%%%%%%%%%%%%%%%% +""" + + diff --git a/multiagent/test_cases/q3/CONFIG b/multiagent/test_cases/q3/CONFIG new file mode 100644 index 0000000..2173a9f --- /dev/null +++ b/multiagent/test_cases/q3/CONFIG @@ -0,0 +1,2 @@ +max_points: "5" +class: "PassAllTestsQuestion" diff --git a/multiagent/test_cases/q4/0-eval-function-lose-states-1.solution b/multiagent/test_cases/q4/0-eval-function-lose-states-1.solution new file mode 100644 index 0000000..416fcee --- /dev/null +++ b/multiagent/test_cases/q4/0-eval-function-lose-states-1.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/0-eval-function-lose-states-1.test. +action: "Left" +generated: "lose1 lose2 root" diff --git a/multiagent/test_cases/q4/0-eval-function-lose-states-1.test b/multiagent/test_cases/q4/0-eval-function-lose-states-1.test new file mode 100644 index 0000000..8682a60 --- /dev/null +++ b/multiagent/test_cases/q4/0-eval-function-lose-states-1.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "2" + +diagram: """ + root + / \ + lose1 lose2 + 1 0 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on losing states. +""" +num_agents: "2" + +start_state: "root" +win_states: "" +lose_states: "lose1 lose2" + +successors: """ +root Left lose1 +root Right lose2 +""" + +evaluation: """ +lose1 1.0 +lose2 0.0 +""" + diff --git a/multiagent/test_cases/q4/0-eval-function-lose-states-2.solution b/multiagent/test_cases/q4/0-eval-function-lose-states-2.solution new file mode 100644 index 0000000..e8d5b38 --- /dev/null +++ b/multiagent/test_cases/q4/0-eval-function-lose-states-2.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/0-eval-function-lose-states-2.test. +action: "Right" +generated: "lose1 lose2 root" diff --git a/multiagent/test_cases/q4/0-eval-function-lose-states-2.test b/multiagent/test_cases/q4/0-eval-function-lose-states-2.test new file mode 100644 index 0000000..3f07534 --- /dev/null +++ b/multiagent/test_cases/q4/0-eval-function-lose-states-2.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "2" + +diagram: """ + root + / \ + lose1 lose2 + 0 1 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on losing states. +""" +num_agents: "2" + +start_state: "root" +win_states: "" +lose_states: "lose1 lose2" + +successors: """ +root Left lose1 +root Right lose2 +""" + +evaluation: """ +lose1 0.0 +lose2 1.0 +""" + diff --git a/multiagent/test_cases/q4/0-eval-function-win-states-1.solution b/multiagent/test_cases/q4/0-eval-function-win-states-1.solution new file mode 100644 index 0000000..ce5d2f2 --- /dev/null +++ b/multiagent/test_cases/q4/0-eval-function-win-states-1.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/0-eval-function-win-states-1.test. +action: "Left" +generated: "root win1 win2" diff --git a/multiagent/test_cases/q4/0-eval-function-win-states-1.test b/multiagent/test_cases/q4/0-eval-function-win-states-1.test new file mode 100644 index 0000000..654af6c --- /dev/null +++ b/multiagent/test_cases/q4/0-eval-function-win-states-1.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "2" + +diagram: """ + root + / \ + win1 win2 + 1 0 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on winning states. +""" +num_agents: "2" + +start_state: "root" +win_states: "win1 win2" +lose_states: "" + +successors: """ +root Left win1 +root Right win2 +""" + +evaluation: """ +win1 1.0 +win2 0.0 +""" + diff --git a/multiagent/test_cases/q4/0-eval-function-win-states-2.solution b/multiagent/test_cases/q4/0-eval-function-win-states-2.solution new file mode 100644 index 0000000..3c65760 --- /dev/null +++ b/multiagent/test_cases/q4/0-eval-function-win-states-2.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/0-eval-function-win-states-2.test. +action: "Right" +generated: "root win1 win2" diff --git a/multiagent/test_cases/q4/0-eval-function-win-states-2.test b/multiagent/test_cases/q4/0-eval-function-win-states-2.test new file mode 100644 index 0000000..93fac73 --- /dev/null +++ b/multiagent/test_cases/q4/0-eval-function-win-states-2.test @@ -0,0 +1,30 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "2" + +diagram: """ + root + / \ + win1 win2 + 0 1 + +If your algorithm is returning a different +action, make sure you are calling the +evaluation function on winning states. +""" +num_agents: "2" + +start_state: "root" +win_states: "win1 win2" +lose_states: "" + +successors: """ +root Left win1 +root Right win2 +""" + +evaluation: """ +win1 0.0 +win2 1.0 +""" + diff --git a/multiagent/test_cases/q4/0-expectimax1.solution b/multiagent/test_cases/q4/0-expectimax1.solution new file mode 100644 index 0000000..a870247 --- /dev/null +++ b/multiagent/test_cases/q4/0-expectimax1.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/0-expectimax1.test. +action: "Left" +generated: "A B C D E F G H I exp1 exp2 exp3 max" diff --git a/multiagent/test_cases/q4/0-expectimax1.test b/multiagent/test_cases/q4/0-expectimax1.test new file mode 100644 index 0000000..d0421be --- /dev/null +++ b/multiagent/test_cases/q4/0-expectimax1.test @@ -0,0 +1,48 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "2" + +# Tree adapted from lecture 6 slides +diagram: """ + max + /-/ | \--\ + / | \ + / | \ + exp1 exp2 exp3 + /|\ /|\ /|\ + / | \ / | \ / | \ +A B C D E F G H I +3 12 8 2 4 6 14 5 2 +""" +num_agents: "2" + +start_state: "max" +win_states: "A B C D E F G H I" +lose_states: "" + +successors: """ +max Left exp1 +max Center exp2 +max Right exp3 +exp1 Left A +exp1 Center B +exp1 Right C +exp2 Left D +exp2 Center E +exp2 Right F +exp3 Left G +exp3 Center H +exp3 Right I +""" + +evaluation: """ +A 3.0 +B 12.0 +C 8.0 +D 2.0 +E 4.0 +F 6.0 +G 14.0 +H 5.0 +I 2.0 +""" diff --git a/multiagent/test_cases/q4/1-expectimax2.solution b/multiagent/test_cases/q4/1-expectimax2.solution new file mode 100644 index 0000000..926e035 --- /dev/null +++ b/multiagent/test_cases/q4/1-expectimax2.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/1-expectimax2.test. +action: "Left" +generated: "A B C D E F G H I exp1 exp2 exp3 max" diff --git a/multiagent/test_cases/q4/1-expectimax2.test b/multiagent/test_cases/q4/1-expectimax2.test new file mode 100644 index 0000000..b4f7858 --- /dev/null +++ b/multiagent/test_cases/q4/1-expectimax2.test @@ -0,0 +1,48 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "2" + +# Tree from lecture 7 slides +diagram: """ + max + /-/ | \--\ + / | \ + / | \ + exp1 exp2 exp3 + /|\ /|\ /|\ + / | \ / | \ / | \ +A B C D E F G H I +3 12 9 2 4 6 15 6 0 +""" +num_agents: "2" + +start_state: "max" +win_states: "A B C D E F G H I" +lose_states: "" + +successors: """ +max Left exp1 +max Center exp2 +max Right exp3 +exp1 Left A +exp1 Center B +exp1 Right C +exp2 Left D +exp2 Center E +exp2 Right F +exp3 Left G +exp3 Center H +exp3 Right I +""" + +evaluation: """ +A 3.0 +B 12.0 +C 9.0 +D 2.0 +E 4.0 +F 6.0 +G 15.0 +H 6.0 +I 0.0 +""" diff --git a/multiagent/test_cases/q4/2-one-ghost-3level.solution b/multiagent/test_cases/q4/2-one-ghost-3level.solution new file mode 100644 index 0000000..17bfb04 --- /dev/null +++ b/multiagent/test_cases/q4/2-one-ghost-3level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/2-one-ghost-3level.test. +action: "Left" +generated: "a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8" diff --git a/multiagent/test_cases/q4/2-one-ghost-3level.test b/multiagent/test_cases/q4/2-one-ghost-3level.test new file mode 100644 index 0000000..3d206e8 --- /dev/null +++ b/multiagent/test_cases/q4/2-one-ghost-3level.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 + 3 9 10 6 4 7 0 5 + +a - max +b - exp +c - max +""" +num_agents: "2" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +""" + +evaluation: """ +d1 3.0 +d2 9.0 +d3 10.0 +d4 6.0 +d5 4.0 +d6 7.0 +d7 0.0 +d8 5.0 +""" diff --git a/multiagent/test_cases/q4/3-one-ghost-4level.solution b/multiagent/test_cases/q4/3-one-ghost-4level.solution new file mode 100644 index 0000000..d31415b --- /dev/null +++ b/multiagent/test_cases/q4/3-one-ghost-4level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/3-one-ghost-4level.test. +action: "Right" +generated: "A B C D E F G H I J K L M N O P a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8" diff --git a/multiagent/test_cases/q4/3-one-ghost-4level.test b/multiagent/test_cases/q4/3-one-ghost-4level.test new file mode 100644 index 0000000..b928139 --- /dev/null +++ b/multiagent/test_cases/q4/3-one-ghost-4level.test @@ -0,0 +1,79 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 +/ \ / \ / \ / \ / \ / \ / \ / \ +A B C D E F G H I J K L M N O P +3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14 + +a - max +b - exp +c - max +d - exp +""" +num_agents: "2" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +""" + +evaluation: """ +A 3.0 +B 13.0 +C 5.0 +D 9.0 +E 10.0 +F 11.0 +G 6.0 +H 8.0 +I 1.0 +J 0.0 +K 4.0 +L 7.0 +M 12.0 +N 15.0 +O 2.0 +P 14.0 +""" diff --git a/multiagent/test_cases/q4/4-two-ghosts-3level.solution b/multiagent/test_cases/q4/4-two-ghosts-3level.solution new file mode 100644 index 0000000..4dae117 --- /dev/null +++ b/multiagent/test_cases/q4/4-two-ghosts-3level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/4-two-ghosts-3level.test. +action: "Left" +generated: "a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8" diff --git a/multiagent/test_cases/q4/4-two-ghosts-3level.test b/multiagent/test_cases/q4/4-two-ghosts-3level.test new file mode 100644 index 0000000..0e58b52 --- /dev/null +++ b/multiagent/test_cases/q4/4-two-ghosts-3level.test @@ -0,0 +1,52 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "3" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 + 3 9 10 6 4 7 0 5 + +a - max +b - exp +c - exp +""" +num_agents: "3" + +start_state: "a" +win_states: "d1 d2 d3 d4 d5 d6 d7 d8" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +""" + +evaluation: """ +d1 3.0 +d2 9.0 +d3 10.0 +d4 6.0 +d5 4.0 +d6 7.0 +d7 0.0 +d8 5.0 +""" diff --git a/multiagent/test_cases/q4/5-two-ghosts-4level.solution b/multiagent/test_cases/q4/5-two-ghosts-4level.solution new file mode 100644 index 0000000..8453f7a --- /dev/null +++ b/multiagent/test_cases/q4/5-two-ghosts-4level.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/5-two-ghosts-4level.test. +action: "Left" +generated: "A B C D E F G H I J K L M N O P a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8" diff --git a/multiagent/test_cases/q4/5-two-ghosts-4level.test b/multiagent/test_cases/q4/5-two-ghosts-4level.test new file mode 100644 index 0000000..b8b74a1 --- /dev/null +++ b/multiagent/test_cases/q4/5-two-ghosts-4level.test @@ -0,0 +1,79 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "4" + +diagram: """ + /-----a------\ + / \ + / \ + b1 b2 + / \ / \ + c1 c2 c3 c4 + / \ / \ / \ / \ + d1 d2 d3 d4 d5 d6 d7 d8 +/ \ / \ / \ / \ / \ / \ / \ / \ +A B C D E F G H I J K L M N O P +3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14 + +a - max +b - exp +c - exp +d - max +""" +num_agents: "3" + +start_state: "a" +win_states: "A B C D E F G H I J K L M N O P" +lose_states: "" + +successors: """ +a Left b1 +a Right b2 +b1 Left c1 +b1 Right c2 +b2 Left c3 +b2 Right c4 +c1 Left d1 +c1 Right d2 +c2 Left d3 +c2 Right d4 +c3 Left d5 +c3 Right d6 +c4 Left d7 +c4 Right d8 +d1 Left A +d1 Right B +d2 Left C +d2 Right D +d3 Left E +d3 Right F +d4 Left G +d4 Right H +d5 Left I +d5 Right J +d6 Left K +d6 Right L +d7 Left M +d7 Right N +d8 Left O +d8 Right P +""" + +evaluation: """ +A 3.0 +B 13.0 +C 5.0 +D 9.0 +E 10.0 +F 11.0 +G 6.0 +H 8.0 +I 1.0 +J 0.0 +K 4.0 +L 7.0 +M 12.0 +N 15.0 +O 2.0 +P 14.0 +""" diff --git a/multiagent/test_cases/q4/6-1a-check-depth-one-ghost.solution b/multiagent/test_cases/q4/6-1a-check-depth-one-ghost.solution new file mode 100644 index 0000000..65223dc --- /dev/null +++ b/multiagent/test_cases/q4/6-1a-check-depth-one-ghost.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/6-1a-check-depth-one-ghost.test. +action: "Left" +generated: "a b1 b2 b3 c1 c2 c3" diff --git a/multiagent/test_cases/q4/6-1a-check-depth-one-ghost.test b/multiagent/test_cases/q4/6-1a-check-depth-one-ghost.test new file mode 100644 index 0000000..e43289d --- /dev/null +++ b/multiagent/test_cases/q4/6-1a-check-depth-one-ghost.test @@ -0,0 +1,83 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "1" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 10 c1 0 c2 c3 8 + | | | + 0 d1 0 d2 d3 8 + | | | + 0 e1 10 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + g1 g2 g3 + 0 0 8 + +a - max +b - min +c - max +d - min +e - max +f - min + +At depth 1, the evaluation function is called at level c, +so Left should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "2" + +start_state: "a" +win_states: "g1 g2 g3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 10.0 +c2 0.0 +c3 8.0 +d1 0.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 10.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 0.0 +g3 8.0 +""" + + diff --git a/multiagent/test_cases/q4/6-1b-check-depth-one-ghost.solution b/multiagent/test_cases/q4/6-1b-check-depth-one-ghost.solution new file mode 100644 index 0000000..ed57c6d --- /dev/null +++ b/multiagent/test_cases/q4/6-1b-check-depth-one-ghost.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/6-1b-check-depth-one-ghost.test. +action: "Center" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3" diff --git a/multiagent/test_cases/q4/6-1b-check-depth-one-ghost.test b/multiagent/test_cases/q4/6-1b-check-depth-one-ghost.test new file mode 100644 index 0000000..8ae747f --- /dev/null +++ b/multiagent/test_cases/q4/6-1b-check-depth-one-ghost.test @@ -0,0 +1,83 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "2" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 10 c1 0 c2 c3 8 + | | | + 0 d1 0 d2 d3 8 + | | | + 0 e1 10 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + g1 g2 g3 + 0 0 8 + +a - max +b - min +c - max +d - min +e - max +f - min + +At depth 2, the evaluation function is called at level e, +so Center should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "2" + +start_state: "a" +win_states: "g1 g2 g3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 10.0 +c2 0.0 +c3 8.0 +d1 0.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 10.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 0.0 +g3 8.0 +""" + + diff --git a/multiagent/test_cases/q4/6-1c-check-depth-one-ghost.solution b/multiagent/test_cases/q4/6-1c-check-depth-one-ghost.solution new file mode 100644 index 0000000..f5b16c4 --- /dev/null +++ b/multiagent/test_cases/q4/6-1c-check-depth-one-ghost.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/6-1c-check-depth-one-ghost.test. +action: "Right" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3" diff --git a/multiagent/test_cases/q4/6-1c-check-depth-one-ghost.test b/multiagent/test_cases/q4/6-1c-check-depth-one-ghost.test new file mode 100644 index 0000000..bf5779a --- /dev/null +++ b/multiagent/test_cases/q4/6-1c-check-depth-one-ghost.test @@ -0,0 +1,83 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "3" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 10 c1 0 c2 c3 8 + | | | + 0 d1 0 d2 d3 8 + | | | + 0 e1 10 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + g1 g2 g3 + 0 0 8 + +a - max +b - min +c - max +d - min +e - max +f - min + +At depth 3, the evaluation function is called at level g, +so Right should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "2" + +start_state: "a" +win_states: "g1 g2 g3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 10.0 +c2 0.0 +c3 8.0 +d1 0.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 10.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 0.0 +g3 8.0 +""" + + diff --git a/multiagent/test_cases/q4/6-2a-check-depth-two-ghosts.solution b/multiagent/test_cases/q4/6-2a-check-depth-two-ghosts.solution new file mode 100644 index 0000000..57a4290 --- /dev/null +++ b/multiagent/test_cases/q4/6-2a-check-depth-two-ghosts.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/6-2a-check-depth-two-ghosts.test. +action: "Left" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3" diff --git a/multiagent/test_cases/q4/6-2a-check-depth-two-ghosts.test b/multiagent/test_cases/q4/6-2a-check-depth-two-ghosts.test new file mode 100644 index 0000000..1465fb2 --- /dev/null +++ b/multiagent/test_cases/q4/6-2a-check-depth-two-ghosts.test @@ -0,0 +1,110 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "1" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 0 c1 0 c2 c3 8 + | | | + 10 d1 0 d2 d3 8 + | | | + 0 e1 0 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + 0 g1 10 g2 g3 8 + | | | + 0 h1 0 h2 h3 8 + | | | + 0 i1 0 i2 i3 8 + | | | + j1 j2 j3 + 0 0 8 + +a - max +b - min +c - min +d - max +e - min +f - min +g - max +h - min +i - min + +At depth 1, the evaluation function is called at level d, +so Left should be returned. If your algorithm is returning a +different action, check how you implemented your depth. +""" + +num_agents: "3" + +start_state: "a" +win_states: "j1 j2 j3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +g1 Center h1 +g2 Center h2 +g3 Center h3 +h1 Center i1 +h2 Center i2 +h3 Center i3 +i1 Center j1 +i2 Center j2 +i3 Center j3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 0.0 +c2 0.0 +c3 8.0 +d1 10.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 0.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 10.0 +g3 8.0 +h1 0.0 +h2 0.0 +h3 8.0 +i1 0.0 +i2 0.0 +i3 8.0 +j1 0.0 +j2 0.0 +j3 8.0 +""" + + diff --git a/multiagent/test_cases/q4/6-2b-check-depth-two-ghosts.solution b/multiagent/test_cases/q4/6-2b-check-depth-two-ghosts.solution new file mode 100644 index 0000000..65a48d5 --- /dev/null +++ b/multiagent/test_cases/q4/6-2b-check-depth-two-ghosts.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/6-2b-check-depth-two-ghosts.test. +action: "Center" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3" diff --git a/multiagent/test_cases/q4/6-2b-check-depth-two-ghosts.test b/multiagent/test_cases/q4/6-2b-check-depth-two-ghosts.test new file mode 100644 index 0000000..4585e5a --- /dev/null +++ b/multiagent/test_cases/q4/6-2b-check-depth-two-ghosts.test @@ -0,0 +1,110 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "2" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 0 c1 0 c2 c3 8 + | | | + 10 d1 0 d2 d3 8 + | | | + 0 e1 0 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + 0 g1 10 g2 g3 8 + | | | + 0 h1 0 h2 h3 8 + | | | + 0 i1 0 i2 i3 8 + | | | + j1 j2 j3 + 0 0 8 + +a - max +b - min +c - min +d - max +e - min +f - min +g - max +h - min +i - min + +At depth 2, the evaluation function is called at level g, +so Center should be returned. If your algorithm is returning +a different action, check how you implemented your depth. +""" + +num_agents: "3" + +start_state: "a" +win_states: "j1 j2 j3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +g1 Center h1 +g2 Center h2 +g3 Center h3 +h1 Center i1 +h2 Center i2 +h3 Center i3 +i1 Center j1 +i2 Center j2 +i3 Center j3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 0.0 +c2 0.0 +c3 8.0 +d1 10.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 0.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 10.0 +g3 8.0 +h1 0.0 +h2 0.0 +h3 8.0 +i1 0.0 +i2 0.0 +i3 8.0 +j1 0.0 +j2 0.0 +j3 8.0 +""" + + diff --git a/multiagent/test_cases/q4/6-2c-check-depth-two-ghosts.solution b/multiagent/test_cases/q4/6-2c-check-depth-two-ghosts.solution new file mode 100644 index 0000000..2047966 --- /dev/null +++ b/multiagent/test_cases/q4/6-2c-check-depth-two-ghosts.solution @@ -0,0 +1,3 @@ +# This is the solution file for test_cases/q4/6-2c-check-depth-two-ghosts.test. +action: "Right" +generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3 h1 h2 h3 i1 i2 i3 j1 j2 j3" diff --git a/multiagent/test_cases/q4/6-2c-check-depth-two-ghosts.test b/multiagent/test_cases/q4/6-2c-check-depth-two-ghosts.test new file mode 100644 index 0000000..1b10950 --- /dev/null +++ b/multiagent/test_cases/q4/6-2c-check-depth-two-ghosts.test @@ -0,0 +1,110 @@ +class: "GraphGameTreeTest" +alg: "ExpectimaxAgent" +depth: "3" + +diagram: """ + a + /-/ | \--\ + / | \ + 0 b1 0 b2 b3 8 + | | | + 0 c1 0 c2 c3 8 + | | | + 10 d1 0 d2 d3 8 + | | | + 0 e1 0 e2 e3 8 + | | | + 0 f1 0 f2 f3 8 + | | | + 0 g1 10 g2 g3 8 + | | | + 0 h1 0 h2 h3 8 + | | | + 0 i1 0 i2 i3 8 + | | | + j1 j2 j3 + 0 0 8 + +a - max +b - min +c - min +d - max +e - min +f - min +g - max +h - min +i - min + +At depth 3, the evaluation function is called at level j, +so Right should be returned. If your algorithm is returning +a different action, check how you implemented your depth. +""" + +num_agents: "3" + +start_state: "a" +win_states: "j1 j2 j3" +lose_states: "" + +successors: """ +a Left b1 +a Center b2 +a Right b3 +b1 Center c1 +b2 Center c2 +b3 Center c3 +c1 Center d1 +c2 Center d2 +c3 Center d3 +d1 Center e1 +d2 Center e2 +d3 Center e3 +e1 Center f1 +e2 Center f2 +e3 Center f3 +f1 Center g1 +f2 Center g2 +f3 Center g3 +g1 Center h1 +g2 Center h2 +g3 Center h3 +h1 Center i1 +h2 Center i2 +h3 Center i3 +i1 Center j1 +i2 Center j2 +i3 Center j3 +""" + + +evaluation: """ +b1 0.0 +b2 0.0 +b3 8.0 +c1 0.0 +c2 0.0 +c3 8.0 +d1 10.0 +d2 0.0 +d3 8.0 +e1 0.0 +e2 0.0 +e3 8.0 +f1 0.0 +f2 0.0 +f3 8.0 +g1 0.0 +g2 10.0 +g3 8.0 +h1 0.0 +h2 0.0 +h3 8.0 +i1 0.0 +i2 0.0 +i3 8.0 +j1 0.0 +j2 0.0 +j3 8.0 +""" + + diff --git a/multiagent/test_cases/q4/7-pacman-game.solution b/multiagent/test_cases/q4/7-pacman-game.solution new file mode 100644 index 0000000..7c611c8 --- /dev/null +++ b/multiagent/test_cases/q4/7-pacman-game.solution @@ -0,0 +1,444 @@ +optimalActions: """ +[[["West", "East"], 59], [["West", "East"], 35]] +[[["West"], 190], [["West"], 127]] +[[["West"], 190], [["West"], 135]] +[[["West", "North"], 120], [["West", "North"], 82]] +[[["West"], 77], [["West"], 57]] +[[["West", "North"], 143], [["West", "North"], 97]] +[[["West"], 155], [["West"], 110]] +[[["West"], 40], [["West"], 27]] +[[["North"], 64], [["North"], 43]] +[[["North"], 85], [["North"], 57]] +[[["North"], 106], [["North"], 71]] +[[["North"], 97], [["North"], 65]] +[[["East"], 154], [["East"], 103]] +[[["Stop"], 130], [["West"], 85]] +[[["West"], 30], [["West"], 17]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["North"], 29], [["North"], 18]] +[[["North"], 50], [["North"], 31]] +[[["West"], 55], [["West"], 36]] +[[["East"], 29], [["East"], 16]] +[[["North"], 89], [["North"], 61]] +[[["East", "North"], 161], [["East", "North"], 121]] +[[["East", "North"], 221], [["East", "North"], 166]] +[[["North", "South"], 105], [["North", "South"], 77]] +[[["West"], 69], [["West"], 51]] +[[["West"], 94], [["West"], 69]] +[[["West", "Stop"], 57], [["West"], 42]] +[[["West", "Stop", "East"], 69], [["West", "East"], 49]] +[[["West", "Stop", "East"], 61], [["West", "East"], 41]] +[[["Stop", "East", "South"], 55], [["East", "South"], 37]] +[[["Stop", "East", "South"], 28], [["East", "South"], 19]] +[[["Stop", "East", "South"], 34], [["East", "South"], 23]] +[[["Stop", "East", "South"], 55], [["East", "South"], 37]] +[[["Stop", "East", "South"], 55], [["East", "South"], 37]] +[[["Stop", "East", "South"], 61], [["East", "South"], 41]] +[[["Stop", "East", "South"], 85], [["East", "South"], 57]] +[[["Stop", "East", "South"], 64], [["East", "South"], 43]] +[[["Stop", "East", "South"], 61], [["East", "South"], 41]] +[[["Stop", "East", "South"], 61], [["East", "South"], 41]] +[[["Stop", "East", "South"], 85], [["East", "South"], 57]] +[[["Stop", "East", "South"], 102], [["East", "South"], 67]] +[[["Stop", "South"], 23], [["South"], 13]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["East", "North"], 29], [["East", "North"], 18]] +[[["East"], 38], [["East"], 22]] +[[["North"], 29], [["North"], 18]] +[[["North"], 38], [["North"], 22]] +[[["East"], 33], [["East"], 22]] +[[["East"], 37], [["East"], 18]] +[[["East"], 18], [["East"], 12]] +[[["East"], 37], [["East"], 26]] +[[["East"], 69], [["East"], 41]] +[[["East"], 56], [["East"], 26]] +[[["East"], 44], [["East"], 29]] +[[["North", "South"], 83], [["North", "South"], 52]] +[[["East", "North"], 121], [["East", "North"], 74]] +[[["East", "North"], 97], [["East", "North"], 73]] +[[["North", "South"], 173], [["North", "South"], 130]] +[[["West", "East"], 90], [["West", "East"], 66]] +[[["West", "Stop", "East"], 161], [["West", "East"], 118]] +[[["Stop", "East", "South"], 58], [["East", "South"], 43]] +[[["Stop", "East"], 120], [["East"], 85]] +[[["East"], 78], [["East"], 45]] +[[["West"], 77], [["West"], 42]] +[[["South"], 83], [["South"], 48]] +[[["South"], 49], [["South"], 37]] +[[["South"], 185], [["South"], 104]] +[[["South"], 68], [["South"], 41]] +[[["West"], 30], [["West"], 18]] +[[["West"], 56], [["West"], 29]] +[[["West"], 14], [["West"], 10]] +[[["West"], 20], [["West"], 14]] +[[["West"], 13], [["West"], 9]] +[[["West"], 13], [["West"], 9]] +[[["West"], 16], [["West"], 12]] +[[["West", "North"], 30], [["West", "North"], 20]] +[[["West"], 38], [["West"], 23]] +[[["West", "Stop", "East", "North"], 70], [["West", "East", "North"], 46]] +[[["West", "Stop", "East"], 128], [["West", "East"], 89]] +[[["West", "Stop", "East"], 31], [["West", "East"], 20]] +[[["Stop", "East", "North"], 69], [["East", "North"], 45]] +[[["Stop", "North"], 58], [["North"], 31]] +[[["North"], 34], [["North"], 19]] +[[["North"], 30], [["North"], 17]] +[[["North"], 19], [["North"], 11]] +[[["North"], 34], [["North"], 19]] +[[["East"], 30], [["East"], 17]] +[[["East"], 19], [["East"], 11]] +[[["East"], 44], [["East"], 29]] +[[["East", "South"], 87], [["East", "South"], 60]] +[[["East", "South"], 108], [["East", "South"], 62]] +[[["South"], 120], [["South"], 61]] +[[["North", "South"], 209], [["North", "South"], 132]] +[[["West"], 108], [["West"], 60]] +[[["West", "Stop", "East", "South"], 83], [["West", "East", "South"], 61]] +[[["West", "Stop", "East", "South"], 90], [["West", "East", "South"], 66]] +[[["West", "Stop", "East"], 134], [["West", "East"], 95]] +[[["West", "Stop", "East"], 82], [["West", "East"], 55]] +[[["Stop", "East", "South"], 142], [["East", "South"], 95]] +[[["Stop", "East", "South"], 98], [["East", "South"], 65]] +[[["Stop", "East", "South"], 128], [["East", "South"], 86]] +[[["Stop", "East", "South"], 82], [["East", "South"], 55]] +[[["Stop", "East", "South"], 85], [["East", "South"], 57]] +[[["Stop", "East", "South"], 190], [["East", "South"], 127]] +[[["Stop", "East", "South"], 158], [["East", "South"], 103]] +[[["Stop", "South"], 50], [["South"], 27]] +[[["South"], 30], [["South"], 17]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["South"], 15], [["South"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["East", "North"], 29], [["East", "North"], 18]] +[[["East"], 37], [["East"], 22]] +[[["East", "North"], 41], [["East", "North"], 24]] +[[["East"], 59], [["East"], 29]] +[[["East"], 19], [["East"], 11]] +[[["East"], 26], [["East"], 15]] +[[["East"], 15], [["East"], 9]] +[[["East"], 15], [["East"], 9]] +[[["East"], 18], [["East"], 12]] +[[["East"], 29], [["East"], 18]] +[[["East"], 37], [["East"], 22]] +[[["East", "North"], 41], [["East", "North"], 24]] +[[["East"], 59], [["East"], 29]] +[[["East"], 19], [["East"], 11]] +[[["North"], 26], [["North"], 15]] +[[["North"], 19], [["North"], 11]] +[[["North"], 30], [["North"], 17]] +[[["North"], 34], [["North"], 19]] +[[["West"], 34], [["West"], 19]] +[[["West"], 25], [["West"], 13]] +[[["West", "Stop", "East"], 7], [["West", "East"], 3]] +""" +altDepthActions: """ +[["West", "East"], ["West", "East"], ["West", "East"], ["West", "East"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["East"], ["East"], ["East"], ["East"]] +[["West", "Stop"], ["West"], ["Stop"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["West"], ["West"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"], ["East"], ["East"]] +[["North", "South"], ["North", "South"], ["North"], ["North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West", "Stop"], ["West"]] +[["West"], ["West"], ["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North", "South"], ["North", "South"], ["South"], ["South"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["North", "South"], ["North", "South"], ["North"], ["North"]] +[["West", "East"], ["West", "East"], ["East"], ["East"]] +[["West"], ["West"], ["East"], ["East"]] +[["Stop", "East", "South"], ["East", "South"], ["East"], ["East"]] +[["Stop", "East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["West"], ["West"], ["West"], ["West"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["West", "East"], ["West", "East"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "Stop", "East", "North"], ["West", "East", "North"], ["West", "Stop", "East", "North"], ["West", "East", "North"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "North"], ["East", "North"], ["Stop", "East", "North"], ["East", "North"]] +[["Stop", "North"], ["North"], ["Stop", "North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]] +[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]] +[["South"], ["South"], ["South"], ["South"]] +[["North", "South"], ["North", "South"], ["North", "South"], ["North", "South"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["South"], ["South"], ["South"], ["South"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East", "North"], ["East", "North"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["East"], ["East"], ["East"], ["East"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["North"], ["North"], ["North"], ["North"]] +[["West"], ["West"], ["West"], ["West"]] +[["West"], ["West"], ["West"], ["West"]] +[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]] +""" +partialPlyBugActions: """ +[["West", "East"], ["West", "East"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "North"], ["West", "North"]] +[["West"], ["West"]] +[["West", "North"], ["West", "North"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["East"], ["East"]] +[["Stop"], ["West"]] +[["West"], ["West"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["West"], ["West"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"]] +[["North", "South"], ["North", "South"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "Stop"], ["West"]] +[["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["North", "South"], ["North", "South"]] +[["East", "North"], ["East", "North"]] +[["East", "North"], ["East", "North"]] +[["North", "South"], ["North", "South"]] +[["West", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East"], ["East"]] +[["East"], ["East"]] +[["West"], ["West"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "North"], ["West", "North"]] +[["West"], ["West"]] +[["West", "Stop", "East", "North"], ["West", "East", "North"]] +[["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "North"], ["East", "North"]] +[["Stop", "North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "South"], ["East", "South"]] +[["East", "South"], ["East", "South"]] +[["South"], ["South"]] +[["North", "South"], ["North", "South"]] +[["West"], ["West"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East", "South"], ["West", "East", "South"]] +[["West", "Stop", "East"], ["West", "East"]] +[["West", "Stop", "East"], ["West", "East"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "East", "South"], ["East", "South"]] +[["Stop", "South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["South"], ["South"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["East", "North"], ["East", "North"]] +[["East"], ["East"]] +[["East"], ["East"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["North"], ["North"]] +[["West"], ["West"]] +[["West"], ["West"]] +[["West", "Stop", "East"], ["West", "East"]] +""" diff --git a/multiagent/test_cases/q4/7-pacman-game.test b/multiagent/test_cases/q4/7-pacman-game.test new file mode 100644 index 0000000..0df1615 --- /dev/null +++ b/multiagent/test_cases/q4/7-pacman-game.test @@ -0,0 +1,19 @@ +class: "PacmanGameTreeTest" +alg: "ExpectimaxAgent" +seed: "0" +depth: "2" +max_points: "4" + +# The following specifies the layout to be used +layoutName: "smallClassic" +layout: """ +%%%%%%%%%%%%%%%%%%%% +%......%G G%......% +%.%%...%% %%...%%.% +%.%o.%........%.o%.% +%.%%.%.%%%%%%.%.%%.% +%........P.........% +%%%%%%%%%%%%%%%%%%%% +""" + + diff --git a/multiagent/test_cases/q4/CONFIG b/multiagent/test_cases/q4/CONFIG new file mode 100644 index 0000000..2173a9f --- /dev/null +++ b/multiagent/test_cases/q4/CONFIG @@ -0,0 +1,2 @@ +max_points: "5" +class: "PassAllTestsQuestion" diff --git a/multiagent/test_cases/q5/CONFIG b/multiagent/test_cases/q5/CONFIG new file mode 100644 index 0000000..c127ab3 --- /dev/null +++ b/multiagent/test_cases/q5/CONFIG @@ -0,0 +1,2 @@ +max_points: "6" +class: "PartialCreditQuestion" diff --git a/multiagent/test_cases/q5/grade-agent.solution b/multiagent/test_cases/q5/grade-agent.solution new file mode 100644 index 0000000..fe06fc0 --- /dev/null +++ b/multiagent/test_cases/q5/grade-agent.solution @@ -0,0 +1,2 @@ +# This is the solution file for test_cases/q5/grade-agent.test. +# File intentionally blank. diff --git a/multiagent/test_cases/q5/grade-agent.test b/multiagent/test_cases/q5/grade-agent.test new file mode 100644 index 0000000..1e21bbf --- /dev/null +++ b/multiagent/test_cases/q5/grade-agent.test @@ -0,0 +1,18 @@ +class: "EvalAgentTest" + +agentName: "ExpectimaxAgent" +agentArgs: "evalFn=better" +layoutName: "smallClassic" +maxTime: "120" +numGames: "10" + +nonTimeoutMinimum: "0" +nonTimeoutThresholds: "10" + +scoreThresholds: "500 1000" + +winsMinimum: "1" +winsThresholds: "1 5 10" + +randomSeed: "0" +ghosts: "[RandomGhost(1)]" diff --git a/multiagent/textDisplay.py b/multiagent/textDisplay.py new file mode 100644 index 0000000..3a28080 --- /dev/null +++ b/multiagent/textDisplay.py @@ -0,0 +1,85 @@ +# textDisplay.py +# -------------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +import time +try: + import pacman +except: + pass + +DRAW_EVERY = 1 +SLEEP_TIME = 0 # This can be overwritten by __init__ +DISPLAY_MOVES = False +QUIET = False # Supresses output + + +class NullGraphics: + def initialize(self, state, isBlue=False): + pass + + def update(self, state): + pass + + def checkNullDisplay(self): + return True + + def pause(self): + time.sleep(SLEEP_TIME) + + def draw(self, state): + print(state) + + def updateDistributions(self, dist): + pass + + def finish(self): + pass + + +class PacmanGraphics: + def __init__(self, speed=None): + if speed != None: + global SLEEP_TIME + SLEEP_TIME = speed + + def initialize(self, state, isBlue=False): + self.draw(state) + self.pause() + self.turn = 0 + self.agentCounter = 0 + + def update(self, state): + numAgents = len(state.agentStates) + self.agentCounter = (self.agentCounter + 1) % numAgents + if self.agentCounter == 0: + self.turn += 1 + if DISPLAY_MOVES: + ghosts = [pacman.nearestPoint( + state.getGhostPosition(i)) for i in range(1, numAgents)] + print("%4d) P: %-8s" % (self.turn, str(pacman.nearestPoint(state.getPacmanPosition()))), + '| Score: %-5d' % state.score, '| Ghosts:', ghosts) + if self.turn % DRAW_EVERY == 0: + self.draw(state) + self.pause() + if state._win or state._lose: + self.draw(state) + + def pause(self): + time.sleep(SLEEP_TIME) + + def draw(self, state): + print(state) + + def finish(self): + pass diff --git a/multiagent/util.py b/multiagent/util.py new file mode 100644 index 0000000..6799299 --- /dev/null +++ b/multiagent/util.py @@ -0,0 +1,717 @@ +# util.py +# ------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +# util.py +# ------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +import sys +import inspect +import heapq +import random +import io + + +class FixedRandom: + def __init__(self): + fixedState = (3, (2147483648, 507801126, 683453281, 310439348, 2597246090, + 2209084787, 2267831527, 979920060, 3098657677, 37650879, 807947081, 3974896263, + 881243242, 3100634921, 1334775171, 3965168385, 746264660, 4074750168, 500078808, + 776561771, 702988163, 1636311725, 2559226045, 157578202, 2498342920, 2794591496, + 4130598723, 496985844, 2944563015, 3731321600, 3514814613, 3362575829, 3038768745, + 2206497038, 1108748846, 1317460727, 3134077628, 988312410, 1674063516, 746456451, + 3958482413, 1857117812, 708750586, 1583423339, 3466495450, 1536929345, 1137240525, + 3875025632, 2466137587, 1235845595, 4214575620, 3792516855, 657994358, 1241843248, + 1695651859, 3678946666, 1929922113, 2351044952, 2317810202, 2039319015, 460787996, + 3654096216, 4068721415, 1814163703, 2904112444, 1386111013, 574629867, 2654529343, + 3833135042, 2725328455, 552431551, 4006991378, 1331562057, 3710134542, 303171486, + 1203231078, 2670768975, 54570816, 2679609001, 578983064, 1271454725, 3230871056, + 2496832891, 2944938195, 1608828728, 367886575, 2544708204, 103775539, 1912402393, + 1098482180, 2738577070, 3091646463, 1505274463, 2079416566, 659100352, 839995305, + 1696257633, 274389836, 3973303017, 671127655, 1061109122, 517486945, 1379749962, + 3421383928, 3116950429, 2165882425, 2346928266, 2892678711, 2936066049, 1316407868, + 2873411858, 4279682888, 2744351923, 3290373816, 1014377279, 955200944, 4220990860, + 2386098930, 1772997650, 3757346974, 1621616438, 2877097197, 442116595, 2010480266, + 2867861469, 2955352695, 605335967, 2222936009, 2067554933, 4129906358, 1519608541, + 1195006590, 1942991038, 2736562236, 279162408, 1415982909, 4099901426, 1732201505, + 2934657937, 860563237, 2479235483, 3081651097, 2244720867, 3112631622, 1636991639, + 3860393305, 2312061927, 48780114, 1149090394, 2643246550, 1764050647, 3836789087, + 3474859076, 4237194338, 1735191073, 2150369208, 92164394, 756974036, 2314453957, + 323969533, 4267621035, 283649842, 810004843, 727855536, 1757827251, 3334960421, + 3261035106, 38417393, 2660980472, 1256633965, 2184045390, 811213141, 2857482069, + 2237770878, 3891003138, 2787806886, 2435192790, 2249324662, 3507764896, 995388363, + 856944153, 619213904, 3233967826, 3703465555, 3286531781, 3863193356, 2992340714, + 413696855, 3865185632, 1704163171, 3043634452, 2225424707, 2199018022, 3506117517, + 3311559776, 3374443561, 1207829628, 668793165, 1822020716, 2082656160, 1160606415, + 3034757648, 741703672, 3094328738, 459332691, 2702383376, 1610239915, 4162939394, + 557861574, 3805706338, 3832520705, 1248934879, 3250424034, 892335058, 74323433, + 3209751608, 3213220797, 3444035873, 3743886725, 1783837251, 610968664, 580745246, + 4041979504, 201684874, 2673219253, 1377283008, 3497299167, 2344209394, 2304982920, + 3081403782, 2599256854, 3184475235, 3373055826, 695186388, 2423332338, 222864327, + 1258227992, 3627871647, 3487724980, 4027953808, 3053320360, 533627073, 3026232514, + 2340271949, 867277230, 868513116, 2158535651, 2487822909, 3428235761, 3067196046, + 3435119657, 1908441839, 788668797, 3367703138, 3317763187, 908264443, 2252100381, + 764223334, 4127108988, 384641349, 3377374722, 1263833251, 1958694944, 3847832657, + 1253909612, 1096494446, 555725445, 2277045895, 3340096504, 1383318686, 4234428127, + 1072582179, 94169494, 1064509968, 2681151917, 2681864920, 734708852, 1338914021, + 1270409500, 1789469116, 4191988204, 1716329784, 2213764829, 3712538840, 919910444, + 1318414447, 3383806712, 3054941722, 3378649942, 1205735655, 1268136494, 2214009444, + 2532395133, 3232230447, 230294038, 342599089, 772808141, 4096882234, 3146662953, + 2784264306, 1860954704, 2675279609, 2984212876, 2466966981, 2627986059, 2985545332, + 2578042598, 1458940786, 2944243755, 3959506256, 1509151382, 325761900, 942251521, + 4184289782, 2756231555, 3297811774, 1169708099, 3280524138, 3805245319, 3227360276, + 3199632491, 2235795585, 2865407118, 36763651, 2441503575, 3314890374, 1755526087, + 17915536, 1196948233, 949343045, 3815841867, 489007833, 2654997597, 2834744136, + 417688687, 2843220846, 85621843, 747339336, 2043645709, 3520444394, 1825470818, + 647778910, 275904777, 1249389189, 3640887431, 4200779599, 323384601, 3446088641, + 4049835786, 1718989062, 3563787136, 44099190, 3281263107, 22910812, 1826109246, + 745118154, 3392171319, 1571490704, 354891067, 815955642, 1453450421, 940015623, + 796817754, 1260148619, 3898237757, 176670141, 1870249326, 3317738680, 448918002, + 4059166594, 2003827551, 987091377, 224855998, 3520570137, 789522610, 2604445123, + 454472869, 475688926, 2990723466, 523362238, 3897608102, 806637149, 2642229586, + 2928614432, 1564415411, 1691381054, 3816907227, 4082581003, 1895544448, 3728217394, + 3214813157, 4054301607, 1882632454, 2873728645, 3694943071, 1297991732, 2101682438, + 3952579552, 678650400, 1391722293, 478833748, 2976468591, 158586606, 2576499787, + 662690848, 3799889765, 3328894692, 2474578497, 2383901391, 1718193504, 3003184595, + 3630561213, 1929441113, 3848238627, 1594310094, 3040359840, 3051803867, 2462788790, + 954409915, 802581771, 681703307, 545982392, 2738993819, 8025358, 2827719383, + 770471093, 3484895980, 3111306320, 3900000891, 2116916652, 397746721, 2087689510, + 721433935, 1396088885, 2751612384, 1998988613, 2135074843, 2521131298, 707009172, + 2398321482, 688041159, 2264560137, 482388305, 207864885, 3735036991, 3490348331, + 1963642811, 3260224305, 3493564223, 1939428454, 1128799656, 1366012432, 2858822447, + 1428147157, 2261125391, 1611208390, 1134826333, 2374102525, 3833625209, 2266397263, + 3189115077, 770080230, 2674657172, 4280146640, 3604531615, 4235071805, 3436987249, + 509704467, 2582695198, 4256268040, 3391197562, 1460642842, 1617931012, 457825497, + 1031452907, 1330422862, 4125947620, 2280712485, 431892090, 2387410588, 2061126784, + 896457479, 3480499461, 2488196663, 4021103792, 1877063114, 2744470201, 1046140599, + 2129952955, 3583049218, 4217723693, 2720341743, 820661843, 1079873609, 3360954200, + 3652304997, 3335838575, 2178810636, 1908053374, 4026721976, 1793145418, 476541615, + 973420250, 515553040, 919292001, 2601786155, 1685119450, 3030170809, 1590676150, + 1665099167, 651151584, 2077190587, 957892642, 646336572, 2743719258, 866169074, + 851118829, 4225766285, 963748226, 799549420, 1955032629, 799460000, 2425744063, + 2441291571, 1928963772, 528930629, 2591962884, 3495142819, 1896021824, 901320159, + 3181820243, 843061941, 3338628510, 3782438992, 9515330, 1705797226, 953535929, + 764833876, 3202464965, 2970244591, 519154982, 3390617541, 566616744, 3438031503, + 1853838297, 170608755, 1393728434, 676900116, 3184965776, 1843100290, 78995357, + 2227939888, 3460264600, 1745705055, 1474086965, 572796246, 4081303004, 882828851, + 1295445825, 137639900, 3304579600, 2722437017, 4093422709, 273203373, 2666507854, + 3998836510, 493829981, 1623949669, 3482036755, 3390023939, 833233937, 1639668730, + 1499455075, 249728260, 1210694006, 3836497489, 1551488720, 3253074267, 3388238003, + 2372035079, 3945715164, 2029501215, 3362012634, 2007375355, 4074709820, 631485888, + 3135015769, 4273087084, 3648076204, 2739943601, 1374020358, 1760722448, 3773939706, + 1313027823, 1895251226, 4224465911, 421382535, 1141067370, 3660034846, 3393185650, + 1850995280, 1451917312, 3841455409, 3926840308, 1397397252, 2572864479, 2500171350, + 3119920613, 531400869, 1626487579, 1099320497, 407414753, 2438623324, 99073255, + 3175491512, 656431560, 1153671785, 236307875, 2824738046, 2320621382, 892174056, + 230984053, 719791226, 2718891946, 624), None) + self.random = random.Random() + self.random.setstate(fixedState) + + +""" + Data structures useful for implementing SearchAgents +""" + + +class Stack: + "A container with a last-in-first-out (LIFO) queuing policy." + + def __init__(self): + self.list = [] + + def push(self, item): + "Push 'item' onto the stack" + self.list.append(item) + + def pop(self): + "Pop the most recently pushed item from the stack" + return self.list.pop() + + def isEmpty(self): + "Returns true if the stack is empty" + return len(self.list) == 0 + + +class Queue: + "A container with a first-in-first-out (FIFO) queuing policy." + + def __init__(self): + self.list = [] + + def push(self, item): + "Enqueue the 'item' into the queue" + self.list.insert(0, item) + + def pop(self): + """ + Dequeue the earliest enqueued item still in the queue. This + operation removes the item from the queue. + """ + return self.list.pop() + + def isEmpty(self): + "Returns true if the queue is empty" + return len(self.list) == 0 + + +class PriorityQueue: + """ + Implements a priority queue data structure. Each inserted item + has a priority associated with it and the client is usually interested + in quick retrieval of the lowest-priority item in the queue. This + data structure allows O(1) access to the lowest-priority item. + """ + + def __init__(self): + self.heap = [] + self.count = 0 + + def push(self, item, priority): + entry = (priority, self.count, item) + heapq.heappush(self.heap, entry) + self.count += 1 + + def pop(self): + (_, _, item) = heapq.heappop(self.heap) + return item + + def isEmpty(self): + return len(self.heap) == 0 + + def update(self, item, priority): + # If item already in priority queue with higher priority, update its priority and rebuild the heap. + # If item already in priority queue with equal or lower priority, do nothing. + # If item not in priority queue, do the same thing as self.push. + for index, (p, c, i) in enumerate(self.heap): + if i == item: + if p <= priority: + break + del self.heap[index] + self.heap.append((priority, c, item)) + heapq.heapify(self.heap) + break + else: + self.push(item, priority) + + +class PriorityQueueWithFunction(PriorityQueue): + """ + Implements a priority queue with the same push/pop signature of the + Queue and the Stack classes. This is designed for drop-in replacement for + those two classes. The caller has to provide a priority function, which + extracts each item's priority. + """ + + def __init__(self, priorityFunction): + "priorityFunction (item) -> priority" + self.priorityFunction = priorityFunction # store the priority function + PriorityQueue.__init__(self) # super-class initializer + + def push(self, item): + "Adds an item to the queue with priority from the priority function" + PriorityQueue.push(self, item, self.priorityFunction(item)) + + +def manhattanDistance(xy1, xy2): + "Returns the Manhattan distance between points xy1 and xy2" + return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1]) + + +""" +Data structures and functions useful for various course projects + +The search project should not need anything below this line. +""" + + +class Counter(dict): + """ + A counter keeps track of counts for a set of keys. + + The counter class is an extension of the standard python + dictionary type. It is specialized to have number values + (integers or floats), and includes a handful of additional + functions to ease the task of counting data. In particular, + all keys are defaulted to have value 0. Using a dictionary: + + a = {} + print a['test'] + + would give an error, while the Counter class analogue: + + >>> a = Counter() + >>> print a['test'] + 0 + + returns the default 0 value. Note that to reference a key + that you know is contained in the counter, + you can still use the dictionary syntax: + + >>> a = Counter() + >>> a['test'] = 2 + >>> print a['test'] + 2 + + This is very useful for counting things without initializing their counts, + see for example: + + >>> a['blah'] += 1 + >>> print a['blah'] + 1 + + The counter also includes additional functionality useful in implementing + the classifiers for this assignment. Two counters can be added, + subtracted or multiplied together. See below for details. They can + also be normalized and their total count and arg max can be extracted. + """ + + def __getitem__(self, idx): + self.setdefault(idx, 0) + return dict.__getitem__(self, idx) + + def incrementAll(self, keys, count): + """ + Increments all elements of keys by the same count. + + >>> a = Counter() + >>> a.incrementAll(['one','two', 'three'], 1) + >>> a['one'] + 1 + >>> a['two'] + 1 + """ + for key in keys: + self[key] += count + + def argMax(self): + """ + Returns the key with the highest value. + """ + if len(list(self.keys())) == 0: + return None + all = list(self.items()) + values = [x[1] for x in all] + maxIndex = values.index(max(values)) + return all[maxIndex][0] + + def sortedKeys(self): + """ + Returns a list of keys sorted by their values. Keys + with the highest values will appear first. + + >>> a = Counter() + >>> a['first'] = -2 + >>> a['second'] = 4 + >>> a['third'] = 1 + >>> a.sortedKeys() + ['second', 'third', 'first'] + """ + sortedItems = list(self.items()) + + def compare(x, y): return sign(y[1] - x[1]) + sortedItems.sort(cmp=compare) + return [x[0] for x in sortedItems] + + def totalCount(self): + """ + Returns the sum of counts for all keys. + """ + return sum(self.values()) + + def normalize(self): + """ + Edits the counter such that the total count of all + keys sums to 1. The ratio of counts for all keys + will remain the same. Note that normalizing an empty + Counter will result in an error. + """ + total = float(self.totalCount()) + if total == 0: + return + for key in list(self.keys()): + self[key] = self[key] / total + + def divideAll(self, divisor): + """ + Divides all counts by divisor + """ + divisor = float(divisor) + for key in self: + self[key] /= divisor + + def copy(self): + """ + Returns a copy of the counter + """ + return Counter(dict.copy(self)) + + def __mul__(self, y): + """ + Multiplying two counters gives the dot product of their vectors where + each unique label is a vector element. + + >>> a = Counter() + >>> b = Counter() + >>> a['first'] = -2 + >>> a['second'] = 4 + >>> b['first'] = 3 + >>> b['second'] = 5 + >>> a['third'] = 1.5 + >>> a['fourth'] = 2.5 + >>> a * b + 14 + """ + sum = 0 + x = self + if len(x) > len(y): + x, y = y, x + for key in x: + if key not in y: + continue + sum += x[key] * y[key] + return sum + + def __radd__(self, y): + """ + Adding another counter to a counter increments the current counter + by the values stored in the second counter. + + >>> a = Counter() + >>> b = Counter() + >>> a['first'] = -2 + >>> a['second'] = 4 + >>> b['first'] = 3 + >>> b['third'] = 1 + >>> a += b + >>> a['first'] + 1 + """ + for key, value in list(y.items()): + self[key] += value + + def __add__(self, y): + """ + Adding two counters gives a counter with the union of all keys and + counts of the second added to counts of the first. + + >>> a = Counter() + >>> b = Counter() + >>> a['first'] = -2 + >>> a['second'] = 4 + >>> b['first'] = 3 + >>> b['third'] = 1 + >>> (a + b)['first'] + 1 + """ + addend = Counter() + for key in self: + if key in y: + addend[key] = self[key] + y[key] + else: + addend[key] = self[key] + for key in y: + if key in self: + continue + addend[key] = y[key] + return addend + + def __sub__(self, y): + """ + Subtracting a counter from another gives a counter with the union of all keys and + counts of the second subtracted from counts of the first. + + >>> a = Counter() + >>> b = Counter() + >>> a['first'] = -2 + >>> a['second'] = 4 + >>> b['first'] = 3 + >>> b['third'] = 1 + >>> (a - b)['first'] + -5 + """ + addend = Counter() + for key in self: + if key in y: + addend[key] = self[key] - y[key] + else: + addend[key] = self[key] + for key in y: + if key in self: + continue + addend[key] = -1 * y[key] + return addend + + +def raiseNotDefined(): + fileName = inspect.stack()[1][1] + line = inspect.stack()[1][2] + method = inspect.stack()[1][3] + + print("*** Method not implemented: %s at line %s of %s" % + (method, line, fileName)) + sys.exit(1) + + +def normalize(vectorOrCounter): + """ + Normalize a vector or counter by dividing each value by the sum of all values + """ + normalizedCounter = Counter() + if type(vectorOrCounter) == type(normalizedCounter): + counter = vectorOrCounter + total = float(counter.totalCount()) + if total == 0: + return counter + for key in list(counter.keys()): + value = counter[key] + normalizedCounter[key] = value / total + return normalizedCounter + else: + vector = vectorOrCounter + s = float(sum(vector)) + if s == 0: + return vector + return [el / s for el in vector] + + +def nSample(distribution, values, n): + if sum(distribution) != 1: + distribution = normalize(distribution) + rand = [random.random() for i in range(n)] + rand.sort() + samples = [] + samplePos, distPos, cdf = 0, 0, distribution[0] + while samplePos < n: + if rand[samplePos] < cdf: + samplePos += 1 + samples.append(values[distPos]) + else: + distPos += 1 + cdf += distribution[distPos] + return samples + + +def sample(distribution, values=None): + if type(distribution) == Counter: + items = sorted(distribution.items()) + distribution = [i[1] for i in items] + values = [i[0] for i in items] + if sum(distribution) != 1: + distribution = normalize(distribution) + choice = random.random() + i, total = 0, distribution[0] + while choice > total: + i += 1 + total += distribution[i] + return values[i] + + +def sampleFromCounter(ctr): + items = sorted(ctr.items()) + return sample([v for k, v in items], [k for k, v in items]) + + +def getProbability(value, distribution, values): + """ + Gives the probability of a value under a discrete distribution + defined by (distributions, values). + """ + total = 0.0 + for prob, val in zip(distribution, values): + if val == value: + total += prob + return total + + +def flipCoin(p): + r = random.random() + return r < p + + +def chooseFromDistribution(distribution): + "Takes either a counter or a list of (prob, key) pairs and samples" + if type(distribution) == dict or type(distribution) == Counter: + return sample(distribution) + r = random.random() + base = 0.0 + for prob, element in distribution: + base += prob + if r <= base: + return element + + +def nearestPoint(pos): + """ + Finds the nearest grid point to a position (discretizes). + """ + (current_row, current_col) = pos + + grid_row = int(current_row + 0.5) + grid_col = int(current_col + 0.5) + return (grid_row, grid_col) + + +def sign(x): + """ + Returns 1 or -1 depending on the sign of x + """ + if(x >= 0): + return 1 + else: + return -1 + + +def arrayInvert(array): + """ + Inverts a matrix stored as a list of lists. + """ + result = [[] for i in array] + for outer in array: + for inner in range(len(outer)): + result[inner].append(outer[inner]) + return result + + +def matrixAsList(matrix, value=True): + """ + Turns a matrix into a list of coordinates matching the specified value + """ + rows, cols = len(matrix), len(matrix[0]) + cells = [] + for row in range(rows): + for col in range(cols): + if matrix[row][col] == value: + cells.append((row, col)) + return cells + + +def lookup(name, namespace): + """ + Get a method or class from any imported module from its name. + Usage: lookup(functionName, globals()) + """ + dots = name.count('.') + if dots > 0: + moduleName, objName = '.'.join( + name.split('.')[:-1]), name.split('.')[-1] + module = __import__(moduleName) + return getattr(module, objName) + else: + modules = [obj for obj in list(namespace.values()) if str( + type(obj)) == ""] + options = [getattr(module, name) + for module in modules if name in dir(module)] + options += [obj[1] + for obj in list(namespace.items()) if obj[0] == name] + if len(options) == 1: + return options[0] + if len(options) > 1: + raise Exception('Name conflict for %s') + raise Exception('%s not found as a method or class' % name) + + +def pause(): + """ + Pauses the output stream awaiting user feedback. + """ + print("") + input() + + +# code to handle timeouts +# +# FIXME +# NOTE: TimeoutFuncton is NOT reentrant. Later timeouts will silently +# disable earlier timeouts. Could be solved by maintaining a global list +# of active time outs. Currently, questions which have test cases calling +# this have all student code so wrapped. +# +import signal +import time + + +class TimeoutFunctionException(Exception): + """Exception to raise on a timeout""" + pass + + +class TimeoutFunction: + def __init__(self, function, timeout): + self.timeout = timeout + self.function = function + + def handle_timeout(self, signum, frame): + raise TimeoutFunctionException() + + def __call__(self, *args, **keyArgs): + # If we have SIGALRM signal, use it to cause an exception if and + # when this function runs too long. Otherwise check the time taken + # after the method has returned, and throw an exception then. + if hasattr(signal, 'SIGALRM'): + old = signal.signal(signal.SIGALRM, self.handle_timeout) + signal.alarm(self.timeout) + try: + result = self.function(*args, **keyArgs) + finally: + signal.signal(signal.SIGALRM, old) + signal.alarm(0) + else: + startTime = time.time() + result = self.function(*args, **keyArgs) + timeElapsed = time.time() - startTime + if timeElapsed >= self.timeout: + self.handle_timeout(None, None) + return result + + +_ORIGINAL_STDOUT = None +_ORIGINAL_STDERR = None +_MUTED = False + + +class WritableNull: + def write(self, string): + pass + + +def mutePrint(): + global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED + if _MUTED: + return + _MUTED = True + + _ORIGINAL_STDOUT = sys.stdout + #_ORIGINAL_STDERR = sys.stderr + sys.stdout = WritableNull() + #sys.stderr = WritableNull() + + +def unmutePrint(): + global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED + if not _MUTED: + return + _MUTED = False + + sys.stdout = _ORIGINAL_STDOUT + #sys.stderr = _ORIGINAL_STDERR