From 1a6c9097275fab4af5a1c172d89d83f8d971057c Mon Sep 17 00:00:00 2001 From: RobCTs <146442289+RobCTs@users.noreply.github.com> Date: Sun, 3 Dec 2023 14:58:33 +0100 Subject: [PATCH 01/10] Add files via upload Working Defensive Agent --- myTeam2.py | 338 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 338 insertions(+) create mode 100644 myTeam2.py diff --git a/myTeam2.py b/myTeam2.py new file mode 100644 index 0000000..a51b0e7 --- /dev/null +++ b/myTeam2.py @@ -0,0 +1,338 @@ +# baselineTeam.py +# --------------- +# Licensing Information: You are free to use or extend these projects for +# educational purposes provided that (1) you do not distribute or publish +# solutions, (2) you retain this notice, and (3) you provide clear +# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. +# +# Attribution Information: The Pacman AI projects were developed at UC Berkeley. +# The core projects and autograders were primarily created by John DeNero +# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# Student side autograding was added by Brad Miller, Nick Hay, and +# Pieter Abbeel (pabbeel@cs.berkeley.edu). + + +# baselineTeam.py +# --------------- +# Licensing Information: Please do not distribute or publish solutions to this +# project. You are free to use and extend these projects for educational +# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by +# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). +# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html + +import random +import contest.util as util +from sklearn.cluster import DBSCAN +import numpy as np + +from contest.captureAgents import CaptureAgent +from contest.game import Directions +from contest.util import nearestPoint + + +################# +# Team creation # +################# + +def create_team(first_index, second_index, is_red, + first='OffensiveReflexAgent', second='DefensiveReflexAgent', num_training=0): + """ + This function should return a list of two agents that will form the + team, initialized using firstIndex and secondIndex as their agent + index numbers. isRed is True if the red team is being created, and + will be False if the blue team is being created. + + As a potentially helpful development aid, this function can take + additional string-valued keyword arguments ("first" and "second" are + such arguments in the case of this function), which will come from + the --redOpts and --blueOpts command-line arguments to capture.py. + For the nightly contest, however, your team will be created without + any extra arguments, so you should make sure that the default + behavior is what you want for the nightly contest. + """ + return [eval(first)(first_index), eval(second)(second_index)] + + +########## +# Agents # +########## + +class ReflexCaptureAgent(CaptureAgent): + """ + A base class for reflex agents that choose score-maximizing actions + """ + + def __init__(self, index, time_for_computing=.1): + super().__init__(index, time_for_computing) + self.start = None + + def register_initial_state(self, game_state): + self.start = game_state.get_agent_position(self.index) + CaptureAgent.register_initial_state(self, game_state) + + def choose_action(self, game_state): + """ + Picks among the actions with the highest Q(s,a). + """ + actions = game_state.get_legal_actions(self.index) + + # You can profile your evaluation time by uncommenting these lines + # start = time.time() + values = [self.evaluate(game_state, a) for a in actions] + # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start) + + max_value = max(values) + best_actions = [a for a, v in zip(actions, values) if v == max_value] + + food_left = len(self.get_food(game_state).as_list()) + + if food_left <= 2: + best_dist = 9999 + best_action = None + for action in actions: + successor = self.get_successor(game_state, action) + pos2 = successor.get_agent_position(self.index) + dist = self.get_maze_distance(self.start, pos2) + if dist < best_dist: + best_action = action + best_dist = dist + return best_action + + return random.choice(best_actions) + + def get_successor(self, game_state, action): + """ + Finds the next successor which is a grid position (location tuple). + """ + successor = game_state.generate_successor(self.index, action) + pos = successor.get_agent_state(self.index).get_position() + if pos != nearestPoint(pos): + # Only half a grid position was covered + return successor.generate_successor(self.index, action) + else: + return successor + + def evaluate(self, game_state, action): + """ + Computes a linear combination of features and feature weights + """ + features = self.get_features(game_state, action) + weights = self.get_weights(game_state, action) + return features * weights + + def get_features(self, game_state, action): + """ + Returns a counter of features for the state + """ + features = util.Counter() + successor = self.get_successor(game_state, action) + features['successor_score'] = self.get_score(successor) + return features + + def get_weights(self, game_state, action): + """ + Normally, weights do not depend on the game state. They can be either + a counter or a dictionary. + """ + return {'successor_score': 1.0} + + +class OffensiveReflexAgent(ReflexCaptureAgent): + """ + A reflex agent that seeks food. This is an agent + we give you to get an idea of what an offensive agent might look like, + but it is by no means the best or only way to build an offensive agent. + """ + + def get_features(self, game_state, action): + features = util.Counter() + successor = self.get_successor(game_state, action) + food_list = self.get_food(successor).as_list() + features['successor_score'] = -len(food_list) # self.getScore(successor) + + # Compute distance to the nearest food + + if len(food_list) > 0: # This should always be True, but better safe than sorry + my_pos = successor.get_agent_state(self.index).get_position() + min_distance = min([self.get_maze_distance(my_pos, food) for food in food_list]) + features['distance_to_food'] = min_distance + return features + + def get_weights(self, game_state, action): + return {'successor_score': 100, 'distance_to_food': -1} + + +class DefensiveReflexAgent(ReflexCaptureAgent): + """ + A reflex agent that keeps its side Pacman-free. Again, + this is to give you an idea of what a defensive agent + could be like. It is not the best or only way to make + such an agent. + """ + + # inharit features from parents + def __init__(self, *args, **kwargs): + super(DefensiveReflexAgent, self).__init__(*args, **kwargs) + + def get_features(self, game_state, action): + features = util.Counter() + successor = self.get_successor(game_state, action) + + my_state = successor.get_agent_state(self.index) + my_pos = my_state.get_position() + print("Current position:", my_pos) + + # Computes whether we're on defense (1) or offense (0) + features['on_defense'] = 1 + if my_state.is_pacman: features['on_defense'] = 0 + + # Computes distance to invaders we can see + enemies = [successor.get_agent_state(i) for i in self.get_opponents(successor)] + invaders = [a for a in enemies if a.is_pacman and a.get_position() is not None] + features['num_invaders'] = len(invaders) + if len(invaders) > 0: + dists = [self.get_maze_distance(my_pos, a.get_position()) for a in invaders] + features['invader_distance'] = min(dists) + # Implementation + else: + # Patrolling strategy when no invaders are visible + features['patrol_distance'] = self.get_patrol_distance(successor) #changed it from game_state + + # Encoding the actions if we need to use it for rewards + if action == Directions.STOP: features['stop'] = 1 + rev = Directions.REVERSE[game_state.get_agent_state(self.index).configuration.direction] + if action == rev: features['reverse'] = 1 + + return features + + + # Defining patrol points + def get_patrol_points(self, game_state): + """ + Identify dynamic patrol points focusing on areas near remaining food + and the nearest power capsule position. + """ + patrol_points = [] + + food_list = self.get_food(game_state).as_list() + nearest_food_in_cluster = self.cluster_food(game_state, food_list) + patrol_points.append(nearest_food_in_cluster) + + + # Include additional strategic points like the nearest power capsule position + power_capsule_position = self.get_power_capsule_position(game_state) + if power_capsule_position: + patrol_points.append(power_capsule_position) + + return patrol_points + + #patrolling strategies + def get_patrol_distance(self, game_state): + """ + Calculate the average distance to key patrol points. + """ + my_state = game_state.get_agent_state(self.index) + my_pos = my_state.get_position() + print("Current positionPatrol:", my_pos) + + # Define key patrol points (static or dynamically determined) + patrol_points = self.get_patrol_points(game_state) + + # Calculate distances to each patrol point + #distances = [self.get_maze_distance(tuple(my_pos), tuple(point)) for point in patrol_points] # point is a np.array, but it needs to be a tuple + distances = [self.get_maze_distance(tuple(map(int, my_pos)), tuple(map(int, point))) for point in patrol_points] + + # Return the average distance + if distances: + return sum(distances) / len(distances) + else: + return 0 + + + def cluster_food(self, game_state, food_list, eps=3, min_samples=2): + """ + Cluster food pellets using DBSCAN. + + :param food_list: List of food pellet coordinates. + :param eps: The maximum distance between two samples for one to be considered as in the neighborhood of the other. + :param min_samples: The number of samples in a neighborhood for a point to be considered as a core point. + :return: List of clusters with their food pellet coordinates. + """ + # Convert food_list to a numpy array for DBSCAN + food_array = np.array(food_list) + + # Apply DBSCAN clustering + dbscan = DBSCAN(eps=eps, min_samples=min_samples) + dbscan.fit(food_array) + + # Extract clustered food pellets + clusters = [food_array[dbscan.labels_ == label] for label in set(dbscan.labels_) if label != -1] + + if not clusters: + return None + + # Find the largest cluster + largest_cluster = max(clusters, key=len) + + # Get current position of the agent + my_pos = game_state.get_agent_state(self.index).get_position() + + # Find the nearest food in the largest cluster + nearest_food = min(largest_cluster, key=lambda food: self.get_maze_distance(my_pos, tuple(food))) + + return tuple(nearest_food) + + + def get_power_capsule_position(self, game_state): + """ + Find and return the position of the nearest power capsule. + """ + my_state = game_state.get_agent_state(self.index) + my_pos = my_state.get_position() + capsules = game_state.get_capsules() + + if capsules: + return min(capsules, key=lambda pos: self.get_maze_distance(my_pos, pos)) + else: + return None + + + def get_weights(self, game_state, action): + """ + Dynamically adjust weights based on the current game state. + """ + + # Default weights + weights = { + 'num_invaders': -1000, + 'on_defense': 100, + 'invader_distance': -10, + 'stop': -100, + 'reverse': -2, + 'patrol_distance': -5 # Weight for patrol distance + } + + # Adjust weights based on specific game state conditions + my_state = game_state.get_agent_state(self.index) + my_pos = my_state.get_position() + enemies = [game_state.get_agent_state(i) for i in self.get_opponents(game_state)] + invaders = [a for a in enemies if a.is_pacman and a.get_position() is not None] + + # Example: Increase the penalty for stopping if there are invaders close by + if invaders: + closest_invader_distance = min([self.get_maze_distance(my_pos, a.get_position()) for a in invaders]) + if closest_invader_distance < 5: # If an invader is very close + weights['stop'] -= 50 # Increase the penalty for stopping + + # Example: Adjust weights based on the remaining food and moves + remaining_food = len(self.get_food_you_are_defending(game_state).as_list()) + remaining_moves = game_state.data.timeleft + total_moves = 1200 # Total moves before the game ends + + if remaining_food <= 4 or remaining_moves < total_moves / 4: + weights['num_invaders'] *=3 + weights['on_defense'] *= 2 + weights['patrol_distance'] *= 1 + + return weights + \ No newline at end of file From a36f8fd999a9c2158fc8a7ffb93715b2c7c607ed Mon Sep 17 00:00:00 2001 From: RobCTs <146442289+RobCTs@users.noreply.github.com> Date: Sun, 3 Dec 2023 15:02:36 +0100 Subject: [PATCH 02/10] README.md --- README.md | 50 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f21c1f5..27121bc 100644 --- a/README.md +++ b/README.md @@ -1 +1,49 @@ -# pacman-capture-flag-contest \ No newline at end of file +# pacman-capture-flag-contest + +1 Introduction +The Eutopia Pacman contest is an activity consisting of a multiplayer capture-the- +ag variant of Pacman, +where agents control both Pacman and ghosts in coordinated team-based strategies. Students from dif- +ferent EUTOPIA universities compete with each other through their programmed agents. Currently both +University of Ljubljana and Universitat Pompeu Fabra (UPF) are participating organizations. UPF is also +the tournament organizer, which hosts and run the tournaments in the HDTIC cluster1. +The project is based on the material from the CS188 course Introduction to Arti cial Intelligence at +Berkeley2, which was extended for the AI course in 2017 by lecturer Prof. Sebastian Sardina at the Royal +Melbourne Institute of Technology (RMIT University) and Dr. Nir Lipovetzky at University of Melbourne +(UoM)3. UPF has refactored the RMIT and UoM code. All the source code is written in Python. + +2 Rules of Pacman Capture the Flag +2.1 Layout +The Pacman map is now divided into two halves: blue (right) and red (left). Red agents (which all have +even indices) must defend the red food while trying to eat the blue food. When on the red side, a red +agent is a ghost. When crossing into enemy territory, the agent becomes a Pacman. +2.2 Scoring +As a Pacman eats food dots, those food dots are stored up inside of that Pacman and removed from the +board. When a Pacman returns to his side of the board, he \deposits" the food dots he is carrying, earning +one point per food pellet delivered. Red team scores are positive, while Blue team scores are negative. +If Pacman is eaten by a ghost before reaching his own side of the board, he will explode into a cloud of +food dots that will be deposited back onto the board. +2.3 Eating Pacman +When a Pacman is eaten by an opposing ghost, the Pacman returns to its starting position (as a ghost). +No points are awarded for eating an opponent. +2.4 Power Capsules +If Pacman eats a power capsule, agents on the opposing team become \scared" for the next 40 moves, +or until they are eaten and respawn, whichever comes sooner. Agents that are \scared" are susceptible +while in the form of ghosts (i.e. while on their own team's side) to being eaten by Pacman. Speci cally, +if Pacman collides with a \scared" ghost, Pacman is una ected and the ghost respawns at its starting +position (no longer in the \scared" state). +2.5 Observations +Agents can only observe an opponent's con guration (position and direction) if they or their teammate is +within 5 squares (Manhattan distance). In addition, an agent always gets a noisy distance reading for each +agent on the board, which can be used to approximately locate unobserved opponents. +2.6 Winning +A game ends when one team returns all but two of the opponents' dots. Games are also limited to 1200 +agent moves (300 moves per each of the four agents). If this move limit is reached, whichever team has +returned the most food wins. If the score is zero (i.e., tied) this is recorded as a tie game. +2.7 Computation Time +We will run your submissions on the UPF cluster, SNOW. Tournaments will generate many processes that +have to be executed without overloading the system. Therefore, each agent has 1 second to return each +action. Each move which does not return within one second will incur a warning. After three warnings, or +any single move taking more than 3 seconds, the game is forfeit. There will be an initial start-up allowance +of 15 seconds (use the registerInitialState function). If your agent times out or otherwise throws an +exception, an error message will be present in the log les, which you can download from the results page. From 13d10746d070b0968b413ffe41b0aa30f0a53ade Mon Sep 17 00:00:00 2001 From: RobCTs <146442289+RobCTs@users.noreply.github.com> Date: Sun, 3 Dec 2023 15:05:10 +0100 Subject: [PATCH 03/10] README.md --- README.md | 68 ++++++++++++++++++++----------------------------------- 1 file changed, 25 insertions(+), 43 deletions(-) diff --git a/README.md b/README.md index 27121bc..3e44f8d 100644 --- a/README.md +++ b/README.md @@ -1,49 +1,31 @@ # pacman-capture-flag-contest 1 Introduction -The Eutopia Pacman contest is an activity consisting of a multiplayer capture-the- -ag variant of Pacman, -where agents control both Pacman and ghosts in coordinated team-based strategies. Students from dif- -ferent EUTOPIA universities compete with each other through their programmed agents. Currently both -University of Ljubljana and Universitat Pompeu Fabra (UPF) are participating organizations. UPF is also -the tournament organizer, which hosts and run the tournaments in the HDTIC cluster1. -The project is based on the material from the CS188 course Introduction to Arti cial Intelligence at -Berkeley2, which was extended for the AI course in 2017 by lecturer Prof. Sebastian Sardina at the Royal -Melbourne Institute of Technology (RMIT University) and Dr. Nir Lipovetzky at University of Melbourne + +The Eutopia Pacman contest is an activity consisting of a multiplayer capture-the-flag variant of Pacman, where agents control both Pacman and ghosts in coordinated team-based strategies. Students from different EUTOPIA universities compete with each other through their programmed agents. +The project is based on the material from the CS188 course Introduction to Arti cial Intelligence at Berkeley2, which was extended for the AI course in 2017 by lecturer Prof. Sebastian Sardina at the Royal Melbourne Institute of Technology (RMIT University) and Dr. Nir Lipovetzky at University of Melbourne (UoM)3. UPF has refactored the RMIT and UoM code. All the source code is written in Python. 2 Rules of Pacman Capture the Flag -2.1 Layout -The Pacman map is now divided into two halves: blue (right) and red (left). Red agents (which all have -even indices) must defend the red food while trying to eat the blue food. When on the red side, a red -agent is a ghost. When crossing into enemy territory, the agent becomes a Pacman. -2.2 Scoring -As a Pacman eats food dots, those food dots are stored up inside of that Pacman and removed from the -board. When a Pacman returns to his side of the board, he \deposits" the food dots he is carrying, earning -one point per food pellet delivered. Red team scores are positive, while Blue team scores are negative. -If Pacman is eaten by a ghost before reaching his own side of the board, he will explode into a cloud of -food dots that will be deposited back onto the board. -2.3 Eating Pacman -When a Pacman is eaten by an opposing ghost, the Pacman returns to its starting position (as a ghost). -No points are awarded for eating an opponent. -2.4 Power Capsules -If Pacman eats a power capsule, agents on the opposing team become \scared" for the next 40 moves, -or until they are eaten and respawn, whichever comes sooner. Agents that are \scared" are susceptible -while in the form of ghosts (i.e. while on their own team's side) to being eaten by Pacman. Speci cally, -if Pacman collides with a \scared" ghost, Pacman is una ected and the ghost respawns at its starting -position (no longer in the \scared" state). -2.5 Observations -Agents can only observe an opponent's con guration (position and direction) if they or their teammate is -within 5 squares (Manhattan distance). In addition, an agent always gets a noisy distance reading for each -agent on the board, which can be used to approximately locate unobserved opponents. -2.6 Winning -A game ends when one team returns all but two of the opponents' dots. Games are also limited to 1200 -agent moves (300 moves per each of the four agents). If this move limit is reached, whichever team has -returned the most food wins. If the score is zero (i.e., tied) this is recorded as a tie game. -2.7 Computation Time -We will run your submissions on the UPF cluster, SNOW. Tournaments will generate many processes that -have to be executed without overloading the system. Therefore, each agent has 1 second to return each -action. Each move which does not return within one second will incur a warning. After three warnings, or -any single move taking more than 3 seconds, the game is forfeit. There will be an initial start-up allowance -of 15 seconds (use the registerInitialState function). If your agent times out or otherwise throws an -exception, an error message will be present in the log les, which you can download from the results page. + +2.1 Layout: +The Pacman map is now divided into two halves: blue (right) and red (left). Red agents (which all have even indices) must defend the red food while trying to eat the blue food. When on the red side, a red agent is a ghost. When crossing into enemy territory, the agent becomes a Pacman. + +2.2 Scoring: +As a Pacman eats food dots, those food dots are stored up inside of that Pacman and removed from the board. When a Pacman returns to his side of the board, he \deposits" the food dots he is carrying, earning one point per food pellet delivered. Red team scores are positive, while Blue team scores are negative. +If Pacman is eaten by a ghost before reaching his own side of the board, he will explode into a cloud of food dots that will be deposited back onto the board. + +2.3 Eating Pacman: +When a Pacman is eaten by an opposing ghost, the Pacman returns to its starting position (as a ghost). No points are awarded for eating an opponent. + +2.4 Power Capsules: +If Pacman eats a power capsule, agents on the opposing team become \scared" for the next 40 moves, or until they are eaten and respawn, whichever comes sooner. Agents that are \scared" are susceptible while in the form of ghosts (i.e. while on their own team's side) to being eaten by Pacman. Speci cally, if Pacman collides with a \scared" ghost, Pacman is una ected and the ghost respawns at its starting position (no longer in the \scared" state). + +2.5 Observations: +Agents can only observe an opponent's con guration (position and direction) if they or their teammate is within 5 squares (Manhattan distance). In addition, an agent always gets a noisy distance reading for each agent on the board, which can be used to approximately locate unobserved opponents. + +2.6 Winning: +A game ends when one team returns all but two of the opponents' dots. Games are also limited to 1200 agent moves (300 moves per each of the four agents). If this move limit is reached, whichever team has returned the most food wins. If the score is zero (i.e., tied) this is recorded as a tie game. + +2.7 Computation Time: +We will run your submissions on the UPF cluster, SNOW. Tournaments will generate many processes that have to be executed without overloading the system. Therefore, each agent has 1 second to return each action. Each move which does not return within one second will incur a warning. After three warnings, or any single move taking more than 3 seconds, the game is forfeit. There will be an initial start-up allowance of 15 seconds (use the registerInitialState function). If your agent times out or otherwise throws an exception, an error message will be present in the log les, which you can download from the results page. From 206b40b04f392c5c219343660911ac2de85b9ee8 Mon Sep 17 00:00:00 2001 From: Nazanin Date: Mon, 4 Dec 2023 05:43:19 +0100 Subject: [PATCH 04/10] merged branches --- myTeam2.py | 153 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 146 insertions(+), 7 deletions(-) diff --git a/myTeam2.py b/myTeam2.py index 597f5d5..1967c65 100644 --- a/myTeam2.py +++ b/myTeam2.py @@ -22,6 +22,8 @@ import random import contest.util as util +from sklearn.cluster import DBSCAN +import numpy as np from contest.captureAgents import CaptureAgent from contest.game import Directions @@ -33,7 +35,7 @@ ################# def create_team(first_index, second_index, is_red, - first='OffensiveGoodAgent', second='DefensiveGoodAgent', num_training=0): + first='OffensiveGoodAgent', second='DefensiveReflexAgent', num_training=0): """ This function should return a list of two agents that will form the team, initialized using firstIndex and secondIndex as their agent @@ -57,15 +59,13 @@ def create_team(first_index, second_index, is_red, class GoodCaptureAgent(CaptureAgent): """ - A base class for agents that choose score-maximizing actions + A base class for reflex agents that choose score-maximizing actions """ def __init__(self, index, time_for_computing=.1): super().__init__(index, time_for_computing) self.start = None - #self.numCarrying = 0 - def register_initial_state(self, game_state): self.start = game_state.get_agent_position(self.index) CaptureAgent.register_initial_state(self, game_state) @@ -317,20 +317,25 @@ def get_weights(self, game_state, action, numCarrying): return {'successor_score': 10, 'distance_to_food': -2, 'distance_to_capsule': -1, 'distance_to_opponent': 1, 'home': -1, 'num_carrying': 5} -class DefensiveGoodAgent(GoodCaptureAgent): +class DefensiveReflexAgent(GoodCaptureAgent): """ - An agent that keeps its side Pacman-free. Again, + A reflex agent that keeps its side Pacman-free. Again, this is to give you an idea of what a defensive agent could be like. It is not the best or only way to make such an agent. """ + # inharit features from parents + def __init__(self, *args, **kwargs): + super(DefensiveReflexAgent, self).__init__(*args, **kwargs) + def get_features(self, game_state, action): features = util.Counter() successor = self.get_successor(game_state, action) my_state = successor.get_agent_state(self.index) my_pos = my_state.get_position() + print("Current position:", my_pos) # Computes whether we're on defense (1) or offense (0) features['on_defense'] = 1 @@ -343,12 +348,146 @@ def get_features(self, game_state, action): if len(invaders) > 0: dists = [self.get_maze_distance(my_pos, a.get_position()) for a in invaders] features['invader_distance'] = min(dists) + # Implementation + else: + # Patrolling strategy when no invaders are visible + features['patrol_distance'] = self.get_patrol_distance(successor) #changed it from game_state + # Encoding the actions if we need to use it for rewards if action == Directions.STOP: features['stop'] = 1 rev = Directions.REVERSE[game_state.get_agent_state(self.index).configuration.direction] if action == rev: features['reverse'] = 1 return features + + + # Defining patrol points + def get_patrol_points(self, game_state): + """ + Identify dynamic patrol points focusing on areas near remaining food + and the nearest power capsule position. + """ + patrol_points = [] + + food_list = self.get_food(game_state).as_list() + nearest_food_in_cluster = self.cluster_food(game_state, food_list) + patrol_points.append(nearest_food_in_cluster) + + + # Include additional strategic points like the nearest power capsule position + power_capsule_position = self.get_power_capsule_position(game_state) + if power_capsule_position: + patrol_points.append(power_capsule_position) + + return patrol_points + + #patrolling strategies + def get_patrol_distance(self, game_state): + """ + Calculate the average distance to key patrol points. + """ + my_state = game_state.get_agent_state(self.index) + my_pos = my_state.get_position() + print("Current positionPatrol:", my_pos) + + # Define key patrol points (static or dynamically determined) + patrol_points = self.get_patrol_points(game_state) + + # Calculate distances to each patrol point + #distances = [self.get_maze_distance(tuple(my_pos), tuple(point)) for point in patrol_points] # point is a np.array, but it needs to be a tuple + distances = [self.get_maze_distance(tuple(map(int, my_pos)), tuple(map(int, point))) for point in patrol_points] + + # Return the average distance + if distances: + return sum(distances) / len(distances) + else: + return 0 + + + def cluster_food(self, game_state, food_list, eps=3, min_samples=2): + """ + Cluster food pellets using DBSCAN. + + :param food_list: List of food pellet coordinates. + :param eps: The maximum distance between two samples for one to be considered as in the neighborhood of the other. + :param min_samples: The number of samples in a neighborhood for a point to be considered as a core point. + :return: List of clusters with their food pellet coordinates. + """ + # Convert food_list to a numpy array for DBSCAN + food_array = np.array(food_list) + + # Apply DBSCAN clustering + dbscan = DBSCAN(eps=eps, min_samples=min_samples) + dbscan.fit(food_array) + + # Extract clustered food pellets + clusters = [food_array[dbscan.labels_ == label] for label in set(dbscan.labels_) if label != -1] + + if not clusters: + return None + + # Find the largest cluster + largest_cluster = max(clusters, key=len) + + # Get current position of the agent + my_pos = game_state.get_agent_state(self.index).get_position() + + # Find the nearest food in the largest cluster + nearest_food = min(largest_cluster, key=lambda food: self.get_maze_distance(my_pos, tuple(food))) + + return tuple(nearest_food) + + + def get_power_capsule_position(self, game_state): + """ + Find and return the position of the nearest power capsule. + """ + my_state = game_state.get_agent_state(self.index) + my_pos = my_state.get_position() + capsules = game_state.get_capsules() + + if capsules: + return min(capsules, key=lambda pos: self.get_maze_distance(my_pos, pos)) + else: + return None + def get_weights(self, game_state, action): - return {'num_invaders': -1000, 'on_defense': 100, 'invader_distance': -10, 'stop': -100, 'reverse': -2} + """ + Dynamically adjust weights based on the current game state. + """ + + # Default weights + weights = { + 'num_invaders': -1000, + 'on_defense': 100, + 'invader_distance': -10, + 'stop': -100, + 'reverse': -2, + 'patrol_distance': -5 # Weight for patrol distance + } + + # Adjust weights based on specific game state conditions + my_state = game_state.get_agent_state(self.index) + my_pos = my_state.get_position() + enemies = [game_state.get_agent_state(i) for i in self.get_opponents(game_state)] + invaders = [a for a in enemies if a.is_pacman and a.get_position() is not None] + + # Example: Increase the penalty for stopping if there are invaders close by + if invaders: + closest_invader_distance = min([self.get_maze_distance(my_pos, a.get_position()) for a in invaders]) + if closest_invader_distance < 5: # If an invader is very close + weights['stop'] -= 50 # Increase the penalty for stopping + + # Example: Adjust weights based on the remaining food and moves + remaining_food = len(self.get_food_you_are_defending(game_state).as_list()) + remaining_moves = game_state.data.timeleft + total_moves = 1200 # Total moves before the game ends + + if remaining_food <= 4 or remaining_moves < total_moves / 4: + weights['num_invaders'] *=3 + weights['on_defense'] *= 2 + weights['patrol_distance'] *= 1 + + return weights + \ No newline at end of file From 2facdfd261bc7f08f8e52fc6683551d3eac91c47 Mon Sep 17 00:00:00 2001 From: Nazanin Date: Mon, 4 Dec 2023 06:07:54 +0100 Subject: [PATCH 05/10] final commit for monday --- myTeam2.py | 342 +---------------------------------------------------- 1 file changed, 3 insertions(+), 339 deletions(-) diff --git a/myTeam2.py b/myTeam2.py index b290e81..1cfb56c 100644 --- a/myTeam2.py +++ b/myTeam2.py @@ -189,7 +189,7 @@ def choose_action(self, game_state): v = self.MCTS(game_state, a, 5, 0, numCarrying) values.append(v) maxValue = max(values) - print(maxValue) + #print(maxValue) bestActions = [a for a, v in zip(actions, values) if v == maxValue] return bestActions[0] @@ -314,347 +314,11 @@ def get_weights(self, game_state, action, numCarrying): return {'successor_score': 0, 'distance_to_food': 0, 'distance_to_capsule': 0, 'distance_to_opponent': 4, 'home': -10, 'num_carrying': 0} #TODO: change priority to getting away from ghost if chased (increase distance_to_opponent weight compared to food) - return {'successor_score': 10, 'distance_to_food': -2, 'distance_to_capsule': -1, 'distance_to_opponent': 1, 'home': -1, + return {'successor_score': 10, 'distance_to_food': -2, 'distance_to_capsule': -1, 'distance_to_opponent': 2, 'home': -1, 'num_carrying': 5} -class DefensiveReflexAgent(GoodCaptureAgent): - """ - A reflex agent that keeps its side Pacman-free. Again, - this is to give you an idea of what a defensive agent - could be like. It is not the best or only way to make - such an agent. - """ - - # inharit features from parents - def __init__(self, *args, **kwargs): - super(DefensiveReflexAgent, self).__init__(*args, **kwargs) - - def get_features(self, game_state, action): - features = util.Counter() - successor = self.get_successor(game_state, action) - - my_state = successor.get_agent_state(self.index) - my_pos = my_state.get_position() - print("Current position:", my_pos) - - # Computes whether we're on defense (1) or offense (0) - features['on_defense'] = 1 - if my_state.is_pacman: features['on_defense'] = 0 - - # Computes distance to invaders we can see - enemies = [successor.get_agent_state(i) for i in self.get_opponents(successor)] - invaders = [a for a in enemies if a.is_pacman and a.get_position() is not None] - features['num_invaders'] = len(invaders) - if len(invaders) > 0: - dists = [self.get_maze_distance(my_pos, a.get_position()) for a in invaders] - features['invader_distance'] = min(dists) - # Implementation - else: - # Patrolling strategy when no invaders are visible - features['patrol_distance'] = self.get_patrol_distance(successor) #changed it from game_state - - # Encoding the actions if we need to use it for rewards - if action == Directions.STOP: features['stop'] = 1 - rev = Directions.REVERSE[game_state.get_agent_state(self.index).configuration.direction] - if action == rev: features['reverse'] = 1 - - return features - - - # Defining patrol points - def get_patrol_points(self, game_state): - """ - Identify dynamic patrol points focusing on areas near remaining food - and the nearest power capsule position. - """ - patrol_points = [] - - food_list = self.get_food(game_state).as_list() - nearest_food_in_cluster = self.cluster_food(game_state, food_list) - patrol_points.append(nearest_food_in_cluster) - - - # Include additional strategic points like the nearest power capsule position - power_capsule_position = self.get_power_capsule_position(game_state) - if power_capsule_position: - patrol_points.append(power_capsule_position) - - return patrol_points - - #patrolling strategies - def get_patrol_distance(self, game_state): - """ - Calculate the average distance to key patrol points. - """ - my_state = game_state.get_agent_state(self.index) - my_pos = my_state.get_position() - print("Current positionPatrol:", my_pos) - - # Define key patrol points (static or dynamically determined) - patrol_points = self.get_patrol_points(game_state) - - # Calculate distances to each patrol point - #distances = [self.get_maze_distance(tuple(my_pos), tuple(point)) for point in patrol_points] # point is a np.array, but it needs to be a tuple - distances = [self.get_maze_distance(tuple(map(int, my_pos)), tuple(map(int, point))) for point in patrol_points] - - # Return the average distance - if distances: - return sum(distances) / len(distances) - else: - return 0 - - - def cluster_food(self, game_state, food_list, eps=3, min_samples=2): - """ - Cluster food pellets using DBSCAN. - - :param food_list: List of food pellet coordinates. - :param eps: The maximum distance between two samples for one to be considered as in the neighborhood of the other. - :param min_samples: The number of samples in a neighborhood for a point to be considered as a core point. - :return: List of clusters with their food pellet coordinates. - """ - # Convert food_list to a numpy array for DBSCAN - food_array = np.array(food_list) - - # Apply DBSCAN clustering - dbscan = DBSCAN(eps=eps, min_samples=min_samples) - dbscan.fit(food_array) - - # Extract clustered food pellets - clusters = [food_array[dbscan.labels_ == label] for label in set(dbscan.labels_) if label != -1] - - if not clusters: - return None - - # Find the largest cluster - largest_cluster = max(clusters, key=len) - - # Get current position of the agent - my_pos = game_state.get_agent_state(self.index).get_position() - - # Find the nearest food in the largest cluster - nearest_food = min(largest_cluster, key=lambda food: self.get_maze_distance(my_pos, tuple(food))) - - return tuple(nearest_food) - - - def get_power_capsule_position(self, game_state): - """ - Find and return the position of the nearest power capsule. - """ - my_state = game_state.get_agent_state(self.index) - my_pos = my_state.get_position() - capsules = game_state.get_capsules() - - if capsules: - return min(capsules, key=lambda pos: self.get_maze_distance(my_pos, pos)) - else: - return None - - - def get_weights(self, game_state, action): - """ - Dynamically adjust weights based on the current game state. - """ - - # Default weights - weights = { - 'num_invaders': -1000, - 'on_defense': 100, - 'invader_distance': -10, - 'stop': -100, - 'reverse': -2, - 'patrol_distance': -5 # Weight for patrol distance - } - - # Adjust weights based on specific game state conditions - my_state = game_state.get_agent_state(self.index) - my_pos = my_state.get_position() - enemies = [game_state.get_agent_state(i) for i in self.get_opponents(game_state)] - invaders = [a for a in enemies if a.is_pacman and a.get_position() is not None] - - # Example: Increase the penalty for stopping if there are invaders close by - if invaders: - closest_invader_distance = min([self.get_maze_distance(my_pos, a.get_position()) for a in invaders]) - if closest_invader_distance < 5: # If an invader is very close - weights['stop'] -= 50 # Increase the penalty for stopping - - # Example: Adjust weights based on the remaining food and moves - remaining_food = len(self.get_food_you_are_defending(game_state).as_list()) - remaining_moves = game_state.data.timeleft - total_moves = 1200 # Total moves before the game ends - if remaining_food <= 4 or remaining_moves < total_moves / 4: - weights['num_invaders'] *=3 - weights['on_defense'] *= 2 - weights['patrol_distance'] *= 1 - - return weights - # baselineTeam.py -# --------------- -# Licensing Information: You are free to use or extend these projects for -# educational purposes provided that (1) you do not distribute or publish -# solutions, (2) you retain this notice, and (3) you provide clear -# attribution to UC Berkeley, including a link to http://ai.berkeley.edu. -# -# Attribution Information: The Pacman AI projects were developed at UC Berkeley. -# The core projects and autograders were primarily created by John DeNero -# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). -# Student side autograding was added by Brad Miller, Nick Hay, and -# Pieter Abbeel (pabbeel@cs.berkeley.edu). - - -# baselineTeam.py -# --------------- -# Licensing Information: Please do not distribute or publish solutions to this -# project. You are free to use and extend these projects for educational -# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by -# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). -# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html - -import random -import contest.util as util -from sklearn.cluster import DBSCAN -import numpy as np - -from contest.captureAgents import CaptureAgent -from contest.game import Directions -from contest.util import nearestPoint - - -################# -# Team creation # -################# - -def create_team(first_index, second_index, is_red, - first='OffensiveReflexAgent', second='DefensiveReflexAgent', num_training=0): - """ - This function should return a list of two agents that will form the - team, initialized using firstIndex and secondIndex as their agent - index numbers. isRed is True if the red team is being created, and - will be False if the blue team is being created. - - As a potentially helpful development aid, this function can take - additional string-valued keyword arguments ("first" and "second" are - such arguments in the case of this function), which will come from - the --redOpts and --blueOpts command-line arguments to capture.py. - For the nightly contest, however, your team will be created without - any extra arguments, so you should make sure that the default - behavior is what you want for the nightly contest. - """ - return [eval(first)(first_index), eval(second)(second_index)] - - -########## -# Agents # -########## - -class ReflexCaptureAgent(CaptureAgent): - """ - A base class for reflex agents that choose score-maximizing actions - """ - - def __init__(self, index, time_for_computing=.1): - super().__init__(index, time_for_computing) - self.start = None - - def register_initial_state(self, game_state): - self.start = game_state.get_agent_position(self.index) - CaptureAgent.register_initial_state(self, game_state) - - def choose_action(self, game_state): - """ - Picks among the actions with the highest Q(s,a). - """ - actions = game_state.get_legal_actions(self.index) - - # You can profile your evaluation time by uncommenting these lines - # start = time.time() - values = [self.evaluate(game_state, a) for a in actions] - # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start) - - max_value = max(values) - best_actions = [a for a, v in zip(actions, values) if v == max_value] - - food_left = len(self.get_food(game_state).as_list()) - - if food_left <= 2: - best_dist = 9999 - best_action = None - for action in actions: - successor = self.get_successor(game_state, action) - pos2 = successor.get_agent_position(self.index) - dist = self.get_maze_distance(self.start, pos2) - if dist < best_dist: - best_action = action - best_dist = dist - return best_action - - return random.choice(best_actions) - - def get_successor(self, game_state, action): - """ - Finds the next successor which is a grid position (location tuple). - """ - successor = game_state.generate_successor(self.index, action) - pos = successor.get_agent_state(self.index).get_position() - if pos != nearestPoint(pos): - # Only half a grid position was covered - return successor.generate_successor(self.index, action) - else: - return successor - - def evaluate(self, game_state, action): - """ - Computes a linear combination of features and feature weights - """ - features = self.get_features(game_state, action) - weights = self.get_weights(game_state, action) - return features * weights - - def get_features(self, game_state, action): - """ - Returns a counter of features for the state - """ - features = util.Counter() - successor = self.get_successor(game_state, action) - features['successor_score'] = self.get_score(successor) - return features - - def get_weights(self, game_state, action): - """ - Normally, weights do not depend on the game state. They can be either - a counter or a dictionary. - """ - return {'successor_score': 1.0} - - -class OffensiveReflexAgent(ReflexCaptureAgent): - """ - A reflex agent that seeks food. This is an agent - we give you to get an idea of what an offensive agent might look like, - but it is by no means the best or only way to build an offensive agent. - """ - - def get_features(self, game_state, action): - features = util.Counter() - successor = self.get_successor(game_state, action) - food_list = self.get_food(successor).as_list() - features['successor_score'] = -len(food_list) # self.getScore(successor) - - # Compute distance to the nearest food - - if len(food_list) > 0: # This should always be True, but better safe than sorry - my_pos = successor.get_agent_state(self.index).get_position() - min_distance = min([self.get_maze_distance(my_pos, food) for food in food_list]) - features['distance_to_food'] = min_distance - return features - - def get_weights(self, game_state, action): - return {'successor_score': 100, 'distance_to_food': -1} - - -class DefensiveReflexAgent(ReflexCaptureAgent): +class DefensiveReflexAgent(GoodCaptureAgent): """ A reflex agent that keeps its side Pacman-free. Again, this is to give you an idea of what a defensive agent From 768ac7169af2b656c62097fd526c12998cbb4ee7 Mon Sep 17 00:00:00 2001 From: RobCTs <146442289+RobCTs@users.noreply.github.com> Date: Mon, 4 Dec 2023 09:19:45 +0100 Subject: [PATCH 06/10] myTeam2.py The final version --- myTeam2.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/myTeam2.py b/myTeam2.py index 1cfb56c..14c45ea 100644 --- a/myTeam2.py +++ b/myTeam2.py @@ -35,7 +35,7 @@ ################# def create_team(first_index, second_index, is_red, - first='OffensiveGoodAgent', second='DefensiveReflexAgent', num_training=0): + first='OffensiveGoodAgent', second='DefensiveGoodAgent', num_training=0): """ This function should return a list of two agents that will form the team, initialized using firstIndex and secondIndex as their agent @@ -59,13 +59,15 @@ def create_team(first_index, second_index, is_red, class GoodCaptureAgent(CaptureAgent): """ - A base class for reflex agents that choose score-maximizing actions + A base class for agents that choose score-maximizing actions """ def __init__(self, index, time_for_computing=.1): super().__init__(index, time_for_computing) self.start = None + #self.numCarrying = 0 + def register_initial_state(self, game_state): self.start = game_state.get_agent_position(self.index) CaptureAgent.register_initial_state(self, game_state) @@ -314,11 +316,10 @@ def get_weights(self, game_state, action, numCarrying): return {'successor_score': 0, 'distance_to_food': 0, 'distance_to_capsule': 0, 'distance_to_opponent': 4, 'home': -10, 'num_carrying': 0} #TODO: change priority to getting away from ghost if chased (increase distance_to_opponent weight compared to food) - return {'successor_score': 10, 'distance_to_food': -2, 'distance_to_capsule': -1, 'distance_to_opponent': 2, 'home': -1, + return {'successor_score': 10, 'distance_to_food': -2, 'distance_to_capsule': -1, 'distance_to_opponent': 1, 'home': -1, 'num_carrying': 5} - -class DefensiveReflexAgent(GoodCaptureAgent): +class DefensiveGoodAgent(GoodCaptureAgent): """ A reflex agent that keeps its side Pacman-free. Again, this is to give you an idea of what a defensive agent @@ -328,7 +329,7 @@ class DefensiveReflexAgent(GoodCaptureAgent): # inharit features from parents def __init__(self, *args, **kwargs): - super(DefensiveReflexAgent, self).__init__(*args, **kwargs) + super(GoodCaptureAgent, self).__init__(*args, **kwargs) def get_features(self, game_state, action): features = util.Counter() @@ -336,7 +337,7 @@ def get_features(self, game_state, action): my_state = successor.get_agent_state(self.index) my_pos = my_state.get_position() - print("Current position:", my_pos) + #print("Current position:", my_pos) # Computes whether we're on defense (1) or offense (0) features['on_defense'] = 1 @@ -389,7 +390,7 @@ def get_patrol_distance(self, game_state): """ my_state = game_state.get_agent_state(self.index) my_pos = my_state.get_position() - print("Current positionPatrol:", my_pos) + #print("Current positionPatrol:", my_pos) # Define key patrol points (static or dynamically determined) patrol_points = self.get_patrol_points(game_state) @@ -491,4 +492,4 @@ def get_weights(self, game_state, action): weights['patrol_distance'] *= 1 return weights - \ No newline at end of file + From c4ab1dd35257c79a3c37606d88e15827005d4381 Mon Sep 17 00:00:00 2001 From: RobCTs <146442289+RobCTs@users.noreply.github.com> Date: Tue, 5 Dec 2023 19:48:19 +0100 Subject: [PATCH 07/10] myTeam2.py --- myTeam2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/myTeam2.py b/myTeam2.py index 14c45ea..38c05c4 100644 --- a/myTeam2.py +++ b/myTeam2.py @@ -22,6 +22,7 @@ import random import contest.util as util +import sklearn from sklearn.cluster import DBSCAN import numpy as np From 389f9de84aef0e293bdcb84322416614ff6b9222 Mon Sep 17 00:00:00 2001 From: RobCTs <146442289+RobCTs@users.noreply.github.com> Date: Tue, 5 Dec 2023 20:03:58 +0100 Subject: [PATCH 08/10] myTeam2.py --- myTeam2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/myTeam2.py b/myTeam2.py index 38c05c4..9708a10 100644 --- a/myTeam2.py +++ b/myTeam2.py @@ -22,7 +22,7 @@ import random import contest.util as util -import sklearn +import scikit-learn from sklearn.cluster import DBSCAN import numpy as np From 2d87f39f295adce82fd03bdc05e55b0df077806f Mon Sep 17 00:00:00 2001 From: RobCTs <146442289+RobCTs@users.noreply.github.com> Date: Thu, 7 Dec 2023 11:08:50 +0100 Subject: [PATCH 09/10] myTeam2.py --- myTeam2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/myTeam2.py b/myTeam2.py index 9708a10..fcaf3b7 100644 --- a/myTeam2.py +++ b/myTeam2.py @@ -22,7 +22,8 @@ import random import contest.util as util -import scikit-learn +import os +os.system('pip install scikit-learn') from sklearn.cluster import DBSCAN import numpy as np From 8e3ad5010cd8606011c2012e4bf9ba68aa020a54 Mon Sep 17 00:00:00 2001 From: RobCTs <146442289+RobCTs@users.noreply.github.com> Date: Fri, 21 Jun 2024 12:42:09 +0200 Subject: [PATCH 10/10] Update README.md --- README.md | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 3e44f8d..6493dd7 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,40 @@ -# pacman-capture-flag-contest +# Pacman-capture-flag-contest +Attack and Defense agent for the Pacman intrauniversities contest +(Best ranking: 11th place out of 32) -1 Introduction +## Team + +| **Name | **GitHub** | +| :---: | :---: | +| `Roberta` | [![name](https://github.com/b-rbmp/NexxGate/blob/main/docs/logos/github.png)](https://github.com/RobCTs) | +| `Nazanin` | [![name](https://github.com/b-rbmp/NexxGate/blob/main/docs/logos/github.png)](https://github.com/Naominickels) | + +### Introduction The Eutopia Pacman contest is an activity consisting of a multiplayer capture-the-flag variant of Pacman, where agents control both Pacman and ghosts in coordinated team-based strategies. Students from different EUTOPIA universities compete with each other through their programmed agents. The project is based on the material from the CS188 course Introduction to Arti cial Intelligence at Berkeley2, which was extended for the AI course in 2017 by lecturer Prof. Sebastian Sardina at the Royal Melbourne Institute of Technology (RMIT University) and Dr. Nir Lipovetzky at University of Melbourne (UoM)3. UPF has refactored the RMIT and UoM code. All the source code is written in Python. -2 Rules of Pacman Capture the Flag +### Rules of Pacman Capture the Flag -2.1 Layout: +**2.1 Layout**: The Pacman map is now divided into two halves: blue (right) and red (left). Red agents (which all have even indices) must defend the red food while trying to eat the blue food. When on the red side, a red agent is a ghost. When crossing into enemy territory, the agent becomes a Pacman. -2.2 Scoring: +**2.2 Scoring:** As a Pacman eats food dots, those food dots are stored up inside of that Pacman and removed from the board. When a Pacman returns to his side of the board, he \deposits" the food dots he is carrying, earning one point per food pellet delivered. Red team scores are positive, while Blue team scores are negative. If Pacman is eaten by a ghost before reaching his own side of the board, he will explode into a cloud of food dots that will be deposited back onto the board. -2.3 Eating Pacman: +**2.3 Eating Pacman:** When a Pacman is eaten by an opposing ghost, the Pacman returns to its starting position (as a ghost). No points are awarded for eating an opponent. -2.4 Power Capsules: +**2.4 Power Capsules:** If Pacman eats a power capsule, agents on the opposing team become \scared" for the next 40 moves, or until they are eaten and respawn, whichever comes sooner. Agents that are \scared" are susceptible while in the form of ghosts (i.e. while on their own team's side) to being eaten by Pacman. Speci cally, if Pacman collides with a \scared" ghost, Pacman is una ected and the ghost respawns at its starting position (no longer in the \scared" state). -2.5 Observations: +**2.5 Observations:** Agents can only observe an opponent's con guration (position and direction) if they or their teammate is within 5 squares (Manhattan distance). In addition, an agent always gets a noisy distance reading for each agent on the board, which can be used to approximately locate unobserved opponents. -2.6 Winning: +**2.6 Winning:** A game ends when one team returns all but two of the opponents' dots. Games are also limited to 1200 agent moves (300 moves per each of the four agents). If this move limit is reached, whichever team has returned the most food wins. If the score is zero (i.e., tied) this is recorded as a tie game. -2.7 Computation Time: +**2.7 Computation Time:** We will run your submissions on the UPF cluster, SNOW. Tournaments will generate many processes that have to be executed without overloading the system. Therefore, each agent has 1 second to return each action. Each move which does not return within one second will incur a warning. After three warnings, or any single move taking more than 3 seconds, the game is forfeit. There will be an initial start-up allowance of 15 seconds (use the registerInitialState function). If your agent times out or otherwise throws an exception, an error message will be present in the log les, which you can download from the results page.